From 6707ba0105a2d350710bc0a537a98f49eb4b895d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 29 Mar 2018 00:06:10 +0200 Subject: ath10k: avoid possible string overflow The way that 'strncat' is used here raised a warning in gcc-8: drivers/net/wireless/ath/ath10k/wmi.c: In function 'ath10k_wmi_tpc_stats_final_disp_tables': drivers/net/wireless/ath/ath10k/wmi.c:4649:4: error: 'strncat' output truncated before terminating nul copying as many bytes from a string as its length [-Werror=stringop-truncation] Effectively, this is simply a strcat() but the use of strncat() suggests some form of overflow check. Regardless of whether this might actually overflow, using strlcat() instead of strncat() avoids the warning and makes the code more robust. Fixes: bc64d05220f3 ("ath10k: debugfs support to get final TPC stats for 10.4 variants") Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c5e1ca5945db..80a80830b363 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4357,7 +4357,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, rate_code[i], type); snprintf(buff, sizeof(buff), "%8d ", tpc[j]); - strncat(tpc_value, buff, strlen(buff)); + strlcat(tpc_value, buff, sizeof(tpc_value)); } tpc_stats->tpc_table[type].pream_idx[i] = pream_idx; tpc_stats->tpc_table[type].rate_code[i] = rate_code[i]; @@ -4694,7 +4694,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, rate_code[i], type, pream_idx); snprintf(buff, sizeof(buff), "%8d ", tpc[j]); - strncat(tpc_value, buff, strlen(buff)); + strlcat(tpc_value, buff, sizeof(tpc_value)); } tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx; tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i]; -- cgit v1.2.3 From db5a4d5e1073be452645d7021eeaf807d70325a2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 28 Mar 2018 18:40:27 +0100 Subject: wil6210: fix potential null dereference of ndev before null check The pointer ndev is being dereferenced before it is being null checked, hence there is a potential null pointer deference. Fix this by only dereferencing ndev after it has been null checked Detected by CoverityScan, CID#1467010 ("Dereference before null check") Fixes: e00243fab84b ("wil6210: infrastructure for multiple virtual interfaces") Signed-off-by: Colin Ian King Reviewed-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a4b413e8d55a..82aec6b06d09 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -391,7 +391,7 @@ static void wil_fw_error_worker(struct work_struct *work) struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct net_device *ndev = wil->main_ndev; - struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct wireless_dev *wdev; wil_dbg_misc(wil, "fw error worker\n"); @@ -399,6 +399,7 @@ static void wil_fw_error_worker(struct work_struct *work) wil_info(wil, "No recovery - interface is down\n"); return; } + wdev = ndev->ieee80211_ptr; /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO * passed since last recovery attempt -- cgit v1.2.3 From ec2c64e202576295bdef8edd18c67428a378cc16 Mon Sep 17 00:00:00 2001 From: Marcus Folkesson Date: Tue, 27 Mar 2018 22:31:44 +0200 Subject: ath10k: sdio: fix memory leak for probe allocations These allocations are not freed upon release. When on it; go for managed resources instead. Signed-off-by: Marcus Folkesson [kvalo: fix two checkpatch warnings] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/sdio.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 03a69e5b1116..2d04c54a4153 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1957,25 +1957,25 @@ static int ath10k_sdio_probe(struct sdio_func *func, ar_sdio = ath10k_sdio_priv(ar); ar_sdio->irq_data.irq_proc_reg = - kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs), - GFP_KERNEL); + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), + GFP_KERNEL); if (!ar_sdio->irq_data.irq_proc_reg) { ret = -ENOMEM; goto err_core_destroy; } ar_sdio->irq_data.irq_en_reg = - kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs), - GFP_KERNEL); + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), + GFP_KERNEL); if (!ar_sdio->irq_data.irq_en_reg) { ret = -ENOMEM; - goto err_free_proc_reg; + goto err_core_destroy; } - ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL); + ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL); if (!ar_sdio->bmi_buf) { ret = -ENOMEM; - goto err_free_en_reg; + goto err_core_destroy; } ar_sdio->func = func; @@ -1995,7 +1995,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); if (!ar_sdio->workqueue) { ret = -ENOMEM; - goto err_free_bmi_buf; + goto err_core_destroy; } for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++) @@ -2011,7 +2011,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, ret = -ENODEV; ath10k_err(ar, "unsupported device id %u (0x%x)\n", dev_id_base, id->device); - goto err_free_bmi_buf; + goto err_core_destroy; } ar->id.vendor = id->vendor; @@ -2040,12 +2040,6 @@ static int ath10k_sdio_probe(struct sdio_func *func, err_free_wq: destroy_workqueue(ar_sdio->workqueue); -err_free_bmi_buf: - kfree(ar_sdio->bmi_buf); -err_free_en_reg: - kfree(ar_sdio->irq_data.irq_en_reg); -err_free_proc_reg: - kfree(ar_sdio->irq_data.irq_proc_reg); err_core_destroy: ath10k_core_destroy(ar); -- cgit v1.2.3 From 6ce36faeac353813b0bfd3b9bc867f3e0d6a35b2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Mar 2018 17:59:49 +0100 Subject: ath10k: fix spelling mistake: "tiggers" -> "triggers" Trivial fix to spelling mistake in message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 80a80830b363..eb1c8a1a1c88 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -7768,7 +7768,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", pdev->self_triggers); + "Sched self triggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Dropped due to SW retries", pdev->sw_retry_failure); -- cgit v1.2.3 From 9c27489a34548913baaaf3b2776e05d4a9389e3e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 30 Mar 2018 13:34:01 -0500 Subject: ath9k: dfs: remove accidental use of stack VLA In preparation to enabling -Wvla, remove accidental use of stack VLA. This avoids an accidental stack VLA (since the compiler thinks the value of FFT_NUM_SAMPLES can change, even when marked "const"). This just replaces it with a #define. Also, fixed as part of the directive to remove all VLAs from the kernel: https://lkml.org/lkml/2018/3/7/621 Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/dfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index 6fee9a464cce..c8844f55574c 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -41,7 +41,7 @@ static const int BIN_DELTA_MAX = 10; /* we need at least 3 deltas / 4 samples for a reliable chirp detection */ #define NUM_DIFFS 3 -static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1); +#define FFT_NUM_SAMPLES (NUM_DIFFS + 1) /* Threshold for difference of delta peaks */ static const int MAX_DIFF = 2; @@ -114,7 +114,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n", datalen, num_fft_packets); - if (num_fft_packets < (FFT_NUM_SAMPLES)) { + if (num_fft_packets < FFT_NUM_SAMPLES) { ath_dbg(common, DFS, "not enough packets for chirp\n"); return false; } @@ -136,7 +136,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, return false; ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n", datalen, num_fft_packets); - if (num_fft_packets < (FFT_NUM_SAMPLES)) { + if (num_fft_packets < FFT_NUM_SAMPLES) { ath_dbg(common, DFS, "not enough packets for chirp\n"); return false; } -- cgit v1.2.3 From 7cae35199bee1cd661926f2d68ac46d07682fc54 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 3 Apr 2018 18:51:52 +0200 Subject: wcn36xx: check for DMA mapping errors in wcn36xx_dxe_tx_frame() Bail out if the mapping fails. Even though this hasn't occured during tests, this unlikely case should still be handled. Signed-off-by: Daniel Mack Acked-by: Ramon Fried Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 2c3b899a88fa..405d350f8708 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -705,6 +705,11 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, ctl->skb->data, ctl->skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(wcn->dev, desc->src_addr_l)) { + dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); + ret = -ENOMEM; + goto unlock; + } desc->dst_addr_l = ch->dxe_wq; desc->fr_len = ctl->skb->len; -- cgit v1.2.3 From 271f1e65ff38f18aa440fec17c759e70a6adfa4e Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 3 Apr 2018 18:51:53 +0200 Subject: wcn36xx: don't keep reference to skb if transmission failed When wcn36xx_dxe_tx_frame() fails to transmit the TX frame, the driver will call into ieee80211_free_txskb() for the skb in flight, so it'll no longer be valid. Hence, we shouldn't keep a reference to it in ctl->skb. Also, if the skb has IEEE80211_TX_CTL_REQ_TX_STATUS set, a pointer to it will currently remain in wcn->tx_ack_skb, which will potentially lead to a crash if accessed later. Fix this by checking the return value of wcn36xx_dxe_tx_frame(), and nullify wcn->tx_ack_skb again in case of errors. Move the assignment of ctl->skb in wcn36xx_dxe_tx_frame() so it only happens when the transmission is successful. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 6 +++--- drivers/net/wireless/ath/wcn36xx/txrx.c | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 405d350f8708..2cedb5a4f9e3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -693,7 +693,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, /* Set source address of the SKB we send */ ctl = ctl->next; - ctl->skb = skb; desc = ctl->desc; if (ctl->bd_cpu_addr) { wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); @@ -702,8 +701,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, } desc->src_addr_l = dma_map_single(wcn->dev, - ctl->skb->data, - ctl->skb->len, + skb->data, + skb->len, DMA_TO_DEVICE); if (dma_mapping_error(wcn->dev, desc->src_addr_l)) { dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); @@ -711,6 +710,7 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, goto unlock; } + ctl->skb = skb; desc->dst_addr_l = ch->dxe_wq; desc->fr_len = ctl->skb->len; diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index b1768ed6b0be..a6902371e89c 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -273,6 +273,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, bool bcast = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); struct wcn36xx_tx_bd bd; + int ret; memset(&bd, 0, sizeof(bd)); @@ -317,5 +318,17 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32)); bd.tx_bd_sign = 0xbdbdbdbd; - return wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); + ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); + if (ret && bd.tx_comp) { + /* If the skb has not been transmitted, + * don't keep a reference to it. + */ + spin_lock_irqsave(&wcn->dxe_lock, flags); + wcn->tx_ack_skb = NULL; + spin_unlock_irqrestore(&wcn->dxe_lock, flags); + + ieee80211_wake_queues(wcn->hw); + } + + return ret; } -- cgit v1.2.3 From 2edfcf2b303cdd63fe0176c6771b47af2d655f8e Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 3 Apr 2018 18:51:54 +0200 Subject: wcn36xx: don't delete invalid bss indices The firmware code cannot cope with requests to remove BSS indices that have not previously been added. This primarily happens when the device is suspended and then resumed. ieee80211_reconfig() then calls into wcn36xx_bss_info_changed() with an empty bssid and BSS_CHANGED_BSSID set, which subsequently leads to a firmware crash: [ 43.647928] qcom-wcnss-pil a204000.wcnss: fatal error received: halMsg.c:4964:halMsg_DelBss: Invalid BSSIndex 0 [ 43.647959] remoteproc remoteproc0: crash detected in a204000.wcnss: type fatal error To fix this, set bss_index to WCN36XX_HAL_BSS_INVALID_IDX for all bss that have not been configured in the firmware, and don't call into the firmware with invalid indices. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 1 + drivers/net/wireless/ath/wcn36xx/smd.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 69d6be59d97f..32bbd6e2fd09 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -953,6 +953,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, mutex_lock(&wcn->conf_mutex); + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 8932af5e4d8d..5be07e40a86d 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1446,6 +1446,10 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) int ret = 0; mutex_lock(&wcn->hal_mutex); + + if (vif_priv->bss_index == WCN36XX_HAL_BSS_INVALID_IDX) + goto out; + INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ); msg_body.bss_index = vif_priv->bss_index; @@ -1464,6 +1468,8 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) wcn36xx_err("hal_delete_bss response failed err=%d\n", ret); goto out; } + + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; out: mutex_unlock(&wcn->hal_mutex); return ret; -- cgit v1.2.3 From 1391cce7daf65dce586aba78df13b0cc4e737416 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 5 Apr 2018 13:51:49 +0200 Subject: wcn36xx: Add missing fall through comment in smd.c This prevents GCC warning. Reported-by: Dan Carpenter Signed-off-by: Loic Poulain Acked-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 5be07e40a86d..9827a1e1124b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2143,6 +2143,7 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) switch (rsp->type) { case WCN36XX_HAL_SCAN_IND_FAILED: scan_info.aborted = true; + /* fall through */ case WCN36XX_HAL_SCAN_IND_COMPLETED: mutex_lock(&wcn->scan_lock); wcn->scan_req = NULL; -- cgit v1.2.3 From 6062546d9b7fecd93b9ad5ed6a34d4741cf3c51a Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Fri, 6 Apr 2018 12:11:28 +0200 Subject: wcn36xx: Remove useless skb spinlock Each DXE control block is associated to a specific channel. The channel lock is always taken before accessing a control block. There is no need to have an extra (useless) spinlock for the control block skb. Signed-off-by: Loic Poulain Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 8 +------- drivers/net/wireless/ath/wcn36xx/dxe.h | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 2cedb5a4f9e3..a1541a8f5212 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch) if (!cur_ctl) goto out_fail; - spin_lock_init(&cur_ctl->skb_lock); cur_ctl->ctl_blk_order = i; if (i == 0) { ch->head_blk_ctl = cur_ctl; @@ -377,12 +376,11 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) /* Keep frame until TX status comes */ ieee80211_free_txskb(wcn->hw, ctl->skb); } - spin_lock(&ctl->skb_lock); + if (wcn->queues_stopped) { wcn->queues_stopped = false; ieee80211_wake_queues(wcn->hw); } - spin_unlock(&ctl->skb_lock); ctl->skb = NULL; } @@ -654,8 +652,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, spin_lock_irqsave(&ch->lock, flags); ctl = ch->head_blk_ctl; - spin_lock(&ctl->next->skb_lock); - /* * If skb is not null that means that we reached the tail of the ring * hence ring is full. Stop queues to let mac80211 back off until ring @@ -664,11 +660,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, if (NULL != ctl->next->skb) { ieee80211_stop_queues(wcn->hw); wcn->queues_stopped = true; - spin_unlock(&ctl->next->skb_lock); spin_unlock_irqrestore(&ch->lock, flags); return -EBUSY; } - spin_unlock(&ctl->next->skb_lock); ctl->skb = NULL; desc = ctl->desc; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index ce580960d109..31b81b7547a3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -422,7 +422,6 @@ struct wcn36xx_dxe_ctl { unsigned int desc_phy_addr; int ctl_blk_order; struct sk_buff *skb; - spinlock_t skb_lock; void *bd_cpu_addr; dma_addr_t bd_phy_addr; }; -- cgit v1.2.3 From 5151a673da43fbaeb62fcbd20814a3cdd4dd3afd Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 10 Apr 2018 17:52:45 +0300 Subject: wcn36xx: allocate skbs with GFP_KERNEL during init GFP_ATOMIC should only be used when the allocation is done from atomic context. Introduce a new flag to wcn36xx_dxe_fill_skb() and use GFP_KERNEL when pre-allocating buffers during init. This doesn't fix an issue that was observed in the wild, but it reduces the chance of failed allocations under memory pressure. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index a1541a8f5212..a6702ada13d3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -274,12 +274,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } -static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) +static int wcn36xx_dxe_fill_skb(struct device *dev, + struct wcn36xx_dxe_ctl *ctl, + gfp_t gfp) { struct wcn36xx_dxe_desc *dxe = ctl->desc; struct sk_buff *skb; - skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC); + skb = alloc_skb(WCN36XX_PKT_SIZE, gfp); if (skb == NULL) return -ENOMEM; @@ -306,7 +308,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, cur_ctl = wcn_ch->head_blk_ctl; for (i = 0; i < wcn_ch->desc_num; i++) { - wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl); + wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL); cur_ctl = cur_ctl->next; } @@ -531,7 +533,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; - ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl); + ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); if (0 == ret) { /* new skb allocation ok. Use the new one and queue * the old one to network system. -- cgit v1.2.3 From eda7d46da443e154cbc8f7c1d68acc5088d38181 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 16 Apr 2018 10:33:41 +0800 Subject: net: mediatek: use of_device_get_match_data() The usage of of_device_get_match_data() reduce the code size a bit. Also, the only way to call mtk_probe() is to match an entry in of_mtk_match[], so match cannot be NULL. Signed-off-by: Ryder Lee Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e0b72bf428f7..d8ebf0a05e0c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2503,7 +2503,6 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; - const struct of_device_id *match; struct mtk_eth *eth; int err; int i; @@ -2512,8 +2511,7 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; - match = of_match_device(of_mtk_match, &pdev->dev); - eth->soc = (struct mtk_soc_data *)match->data; + eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); -- cgit v1.2.3 From c009f413b79de526a355b6eefa4f900b6c45d5f4 Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Mon, 16 Apr 2018 12:52:16 +0530 Subject: net: netsec: enable tx-irq during open callback Enable TX-irq as well during ndo_open() as we can not count upon RX to arrive early enough to trigger the napi. This patch is critical for installation over network. Fixes: 533dd11a12f6 ("net: socionext: Add Synquacer NetSec driver") Signed-off-by: Jassi Brar Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/netsec.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index f4c0b02ddad8..f6fe70edbbfe 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1313,8 +1313,8 @@ static int netsec_netdev_open(struct net_device *ndev) napi_enable(&priv->napi); netif_start_queue(ndev); - /* Enable RX intr. */ - netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX); + /* Enable TX+RX intr. */ + netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); return 0; err3: -- cgit v1.2.3 From 9a00b697ce31e38c670a3042cf9f1e9cf28dabb5 Mon Sep 17 00:00:00 2001 From: Masahisa KOJIMA Date: Mon, 16 Apr 2018 13:09:59 +0530 Subject: net: socionext: reset hardware in ndo_stop When the interface is down, head/tail of the descriptor ring address is set to 0 in netsec_netdev_stop(). But netsec hardware still keeps the previous descriptor ring address, so there is inconsistency between driver and hardware after interface is up at a later time. To address this inconsistency, add netsec_reset_hardware() when the interface is down. In addition, to minimize the reset process, add flag to decide whether driver loads the netsec microcode. Even if driver resets the netsec hardware, netsec microcode keeps resident on RAM, so it is ok we only load the microcode at initialization. This patch is critical for installation over network. Signed-off-by: Masahisa KOJIMA Fixes: 533dd11a12f6 ("net: socionext: Add Synquacer NetSec driver") Signed-off-by: Jassi Brar Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/netsec.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index f6fe70edbbfe..aa50331b7607 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1057,7 +1057,8 @@ static int netsec_netdev_load_microcode(struct netsec_priv *priv) return 0; } -static int netsec_reset_hardware(struct netsec_priv *priv) +static int netsec_reset_hardware(struct netsec_priv *priv, + bool load_ucode) { u32 value; int err; @@ -1102,11 +1103,14 @@ static int netsec_reset_hardware(struct netsec_priv *priv) netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, 1 << NETSEC_REG_DESC_ENDIAN); - err = netsec_netdev_load_microcode(priv); - if (err) { - netif_err(priv, probe, priv->ndev, - "%s: failed to load microcode (%d)\n", __func__, err); - return err; + if (load_ucode) { + err = netsec_netdev_load_microcode(priv); + if (err) { + netif_err(priv, probe, priv->ndev, + "%s: failed to load microcode (%d)\n", + __func__, err); + return err; + } } /* start DMA engines */ @@ -1328,6 +1332,7 @@ err1: static int netsec_netdev_stop(struct net_device *ndev) { + int ret; struct netsec_priv *priv = netdev_priv(ndev); netif_stop_queue(priv->ndev); @@ -1343,12 +1348,14 @@ static int netsec_netdev_stop(struct net_device *ndev) netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + ret = netsec_reset_hardware(priv, false); + phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); pm_runtime_put_sync(priv->dev); - return 0; + return ret; } static int netsec_netdev_init(struct net_device *ndev) @@ -1364,7 +1371,7 @@ static int netsec_netdev_init(struct net_device *ndev) if (ret) goto err1; - ret = netsec_reset_hardware(priv); + ret = netsec_reset_hardware(priv, true); if (ret) goto err2; -- cgit v1.2.3 From 9f463608b98331f58e37d175830b53cec01859a4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Apr 2018 13:43:36 +0100 Subject: net/mlx5: remove some extraneous spaces in indentations There are several lines where there is an extraneous space causing indentation misalignment. Remove them. Cleans up Cocconelle warnings: ./drivers/net/ethernet/mellanox/mlx5/core/qp.c:409:3-18: code aligned with following code on line 410 ./drivers/net/ethernet/mellanox/mlx5/core/qp.c:415:3-18: code aligned with following code on line 416 ./drivers/net/ethernet/mellanox/mlx5/core/qp.c:421:3-18: code aligned with following code on line 422 Signed-off-by: Colin Ian King Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 02d6c5b5d502..4ca07bfb6b14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -407,21 +407,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, case MLX5_CMD_OP_RST2INIT_QP: if (MBOX_ALLOC(mbox, rst2init_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_INIT2RTR_QP: if (MBOX_ALLOC(mbox, init2rtr_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_RTR2RTS_QP: if (MBOX_ALLOC(mbox, rtr2rts_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_RTS2RTS_QP: if (MBOX_ALLOC(mbox, rts2rts_qp)) return -ENOMEM; -- cgit v1.2.3 From 42de047d60bc5d87e369c36115058b9dacc5683c Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 16 Apr 2018 16:08:12 +0100 Subject: net: stmmac: Switch stmmac_desc_ops to generic HW Interface Helpers Switch stmmac_desc_ops to generic Hardware Interface Helpers instead of using hard-coded callbacks. This makes the code more readable and more flexible. No functional change. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 14 +-- drivers/net/ethernet/stmicro/stmmac/common.h | 55 +-------- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 4 +- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 4 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 125 +++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 4 +- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 15 +-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 110 +++++++++--------- 8 files changed, 195 insertions(+), 136 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/hwif.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index e93c40b4631e..ca0f9c205f9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -51,8 +51,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = bmax; /* do not close the descriptor and do not set own bit */ - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, - 0, false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false, skb->len); while (len != 0) { tx_q->tx_skbuff[entry] = NULL; @@ -68,9 +68,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = bmax; - priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, - STMMAC_CHAIN_MODE, 1, - false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, false, skb->len); len -= bmax; i++; } else { @@ -83,9 +82,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = len; /* last descriptor can be set now */ - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_CHAIN_MODE, 1, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_CHAIN_MODE, 1, true, skb->len); len = 0; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ad2388aee463..2c50d8c0a557 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -32,6 +32,7 @@ #endif #include "descs.h" +#include "hwif.h" #include "mmc.h" /* Synopsys Core versions */ @@ -377,60 +378,6 @@ struct dma_features { #define JUMBO_LEN 9000 -/* Descriptors helpers */ -struct stmmac_desc_ops { - /* DMA RX descriptor ring initialization */ - void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, - int end); - /* DMA TX descriptor ring initialization */ - void (*init_tx_desc) (struct dma_desc *p, int mode, int end); - - /* Invoked by the xmit function to prepare the tx descriptor */ - void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, - bool csum_flag, int mode, bool tx_own, - bool ls, unsigned int tot_pkt_len); - void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, - int len2, bool tx_own, bool ls, - unsigned int tcphdrlen, - unsigned int tcppayloadlen); - /* Set/get the owner of the descriptor */ - void (*set_tx_owner) (struct dma_desc *p); - int (*get_tx_owner) (struct dma_desc *p); - /* Clean the tx descriptor as soon as the tx irq is received */ - void (*release_tx_desc) (struct dma_desc *p, int mode); - /* Clear interrupt on tx frame completion. When this bit is - * set an interrupt happens as soon as the frame is transmitted */ - void (*set_tx_ic)(struct dma_desc *p); - /* Last tx segment reports the transmit status */ - int (*get_tx_ls) (struct dma_desc *p); - /* Return the transmit status looking at the TDES1 */ - int (*tx_status) (void *data, struct stmmac_extra_stats *x, - struct dma_desc *p, void __iomem *ioaddr); - /* Get the buffer size from the descriptor */ - int (*get_tx_len) (struct dma_desc *p); - /* Handle extra events on specific interrupts hw dependent */ - void (*set_rx_owner) (struct dma_desc *p); - /* Get the receive frame size */ - int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); - /* Return the reception status looking at the RDES1 */ - int (*rx_status) (void *data, struct stmmac_extra_stats *x, - struct dma_desc *p); - void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x, - struct dma_extended_desc *p); - /* Set tx timestamp enable bit */ - void (*enable_tx_timestamp) (struct dma_desc *p); - /* get tx timestamp status */ - int (*get_tx_timestamp_status) (struct dma_desc *p); - /* get timestamp value */ - u64(*get_timestamp) (void *desc, u32 ats); - /* get rx timestamp status */ - int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); - /* Display ring */ - void (*display_ring)(void *head, unsigned int size, bool rx); - /* set MSS via context descriptor */ - void (*set_mss)(struct dma_desc *p, unsigned int mss); -}; - extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 2a6521d33e43..65ed896c13cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -223,7 +223,7 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) return 0; } -static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) +static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns; @@ -232,7 +232,7 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) /* convert high/sec time stamp value to nanosecond */ ns += le32_to_cpu(p->des1) * 1000000000ULL; - return ns; + *ts = ns; } static int dwmac4_rx_check_timestamp(void *desc) diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 6768a25b6aa0..3bfb3f584be2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -382,7 +382,7 @@ static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; } -static u64 enh_desc_get_timestamp(void *desc, u32 ats) +static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts) { u64 ns; @@ -397,7 +397,7 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats) ns += le32_to_cpu(p->des3) * 1000000000ULL; } - return ns; + *ts = ns; } static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h new file mode 100644 index 000000000000..4994677f66cd --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. +// stmmac HW Interface Callbacks + +#ifndef __STMMAC_HWIF_H__ +#define __STMMAC_HWIF_H__ + +#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module->__cname) { \ + (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result = 0; \ + } \ + __result; \ +}) +#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module->__cname) \ + __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result; \ +}) + +struct stmmac_extra_stats; +struct stmmac_safety_stats; +struct dma_desc; +struct dma_extended_desc; + +/* Descriptors helpers */ +struct stmmac_desc_ops { + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, + int end); + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct dma_desc *p, int mode, int end); + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, bool ls, + unsigned int tot_pkt_len); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner)(struct dma_desc *p); + int (*get_tx_owner)(struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct dma_desc *p, int mode); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*set_tx_ic)(struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*set_rx_owner)(struct dma_desc *p); + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); + /* Return the reception status looking at the RDES1 */ + int (*rx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + void (*get_timestamp)(void *desc, u32 ats, u64 *ts); + /* get rx timestamp status */ + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); +}; + +#define stmmac_init_rx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_rx_desc, __args) +#define stmmac_init_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_tx_desc, __args) +#define stmmac_prepare_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args) +#define stmmac_prepare_tso_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args) +#define stmmac_set_tx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_owner, __args) +#define stmmac_get_tx_owner(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_owner, __args) +#define stmmac_release_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, release_tx_desc, __args) +#define stmmac_set_tx_ic(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) +#define stmmac_get_tx_ls(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_ls, __args) +#define stmmac_tx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, tx_status, __args) +#define stmmac_get_tx_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_len, __args) +#define stmmac_set_rx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_rx_owner, __args) +#define stmmac_get_rx_frame_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_frame_len, __args) +#define stmmac_rx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, rx_status, __args) +#define stmmac_rx_extended_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, rx_extended_status, __args) +#define stmmac_enable_tx_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args) +#define stmmac_get_tx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args) +#define stmmac_get_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_timestamp, __args) +#define stmmac_get_rx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args) +#define stmmac_display_ring(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, display_ring, __args) +#define stmmac_set_mss(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_mss, __args) + +#endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index ebd9e5e00f16..7b1d901bf5bc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -253,7 +253,7 @@ static int ndesc_get_tx_timestamp_status(struct dma_desc *p) return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; } -static u64 ndesc_get_timestamp(void *desc, u32 ats) +static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns; @@ -262,7 +262,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats) /* convert high/sec time stamp value to nanosecond */ ns += le32_to_cpu(p->des3) * 1000000000ULL; - return ns; + *ts = ns; } static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 28e4b5d50ce6..808ca758d0ae 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -58,9 +58,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, - false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -79,9 +78,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_RING_MODE, 1, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_RING_MODE, 1, true, skb->len); } else { des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -92,9 +90,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].len = nopaged_len; tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, + STMMAC_RING_MODE, 0, true, skb->len); } tx_q->cur_tx = entry; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a16931ce39d..c44e19d5dbf2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -50,6 +50,7 @@ #include #include #include "dwmac1000.h" +#include "hwif.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -464,9 +465,9 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; /* check tx tstamp status */ - if (priv->hw->desc->get_tx_timestamp_status(p)) { + if (stmmac_get_tx_timestamp_status(priv, p)) { /* get the valid tstamp */ - ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); + stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -502,8 +503,8 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, desc = np; /* Check if timestamp is available */ - if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { + stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -1008,7 +1009,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) head_rx = (void *)rx_q->dma_rx; /* Display RX ring */ - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); + stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); } } @@ -1029,7 +1030,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) else head_tx = (void *)tx_q->dma_tx; - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); + stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); } } @@ -1073,13 +1074,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) /* Clear the RX descriptors */ for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, - priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); else - priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], - priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); } /** @@ -1097,13 +1098,11 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) /* Clear the TX descriptors */ for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); } /** @@ -1851,9 +1850,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) else p = tx_q->dma_tx + entry; - status = priv->hw->desc->tx_status(&priv->dev->stats, - &priv->xstats, p, - priv->ioaddr); + status = stmmac_tx_status(priv, &priv->dev->stats, + &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ if (unlikely(status & tx_dma_own)) break; @@ -1904,7 +1902,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff[entry] = NULL; } - priv->hw->desc->release_tx_desc(p, priv->mode); + stmmac_release_tx_desc(priv, p, priv->mode); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); } @@ -1957,13 +1955,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) dma_free_tx_skbufs(priv, chan); for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); tx_q->dirty_tx = 0; tx_q->cur_tx = 0; tx_q->mss = 0; @@ -2851,10 +2847,10 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len; - priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, - 0, 1, - (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), - 0, 0); + stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, + 0, 1, + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), + 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; } @@ -2926,7 +2922,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* set new MSS value if needed */ if (mss != tx_q->mss) { mss_desc = tx_q->dma_tx + tx_q->cur_tx; - priv->hw->desc->set_mss(mss_desc, mss); + stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); @@ -3012,7 +3008,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) STMMAC_COAL_TIMER(priv->tx_coal_timer)); } else { priv->tx_count_frames = 0; - priv->hw->desc->set_tx_ic(desc); + stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; } @@ -3022,11 +3018,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + stmmac_enable_tx_timestamp(priv, first); } /* Complete the first descriptor before granting the DMA */ - priv->hw->desc->prepare_tso_tx_desc(first, 1, + stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, pay_len, 1, tx_q->tx_skbuff_dma[first_entry].last_segment, @@ -3040,7 +3036,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * sure that MSS's own bit is the last thing written. */ dma_wmb(); - priv->hw->desc->set_tx_owner(mss_desc); + stmmac_set_tx_owner(priv, mss_desc); } /* The own bit must be the latest setting done when prepare the @@ -3054,8 +3050,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, tx_q->cur_tx, first, nfrags); - priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, - 0); + stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb_headlen(skb)); @@ -3174,9 +3169,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[entry].last_segment = last_segment; /* Prepare the descriptor and set the own bit too */ - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode, 1, last_segment, - skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, + priv->mode, 1, last_segment, skb->len); } /* Only the last descriptor gets to point to the skb. */ @@ -3203,7 +3197,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else tx_head = (void *)tx_q->dma_tx; - priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); + stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); @@ -3228,7 +3222,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) STMMAC_COAL_TIMER(priv->tx_coal_timer)); } else { priv->tx_count_frames = 0; - priv->hw->desc->set_tx_ic(desc); + stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; } @@ -3259,13 +3253,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + stmmac_enable_tx_timestamp(priv, first); } /* Prepare the first descriptor setting the OWN bit too */ - priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, - csum_insertion, priv->mode, 1, - last_segment, skb->len); + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, + csum_insertion, priv->mode, 1, last_segment, + skb->len); /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that @@ -3382,9 +3376,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) dma_wmb(); if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); + stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0); else - priv->hw->desc->set_rx_owner(p); + stmmac_set_rx_owner(priv, p); dma_wmb(); @@ -3418,7 +3412,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else rx_head = (void *)rx_q->dma_rx; - priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); + stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { int status; @@ -3431,8 +3425,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ - status = priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p); + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; @@ -3449,11 +3443,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) prefetch(np); - if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) - priv->hw->desc->rx_extended_status(&priv->dev->stats, - &priv->xstats, - rx_q->dma_erx + - entry); + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { @@ -3479,7 +3471,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else des = le32_to_cpu(p->des2); - frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + frame_len = stmmac_get_rx_frame_len(priv, p, coe); /* If frame length is greater than skb buffer size * (preallocated during init) then the packet is -- cgit v1.2.3 From a4e887fa6dcc4e8ca4ba93bca3f40b5ab366edf2 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 16 Apr 2018 16:08:13 +0100 Subject: net: stmmac: Switch stmmac_dma_ops to generic HW Interface Helpers Switch stmmac_dma_ops to generic Hardware Interface Helpers instead of using hard-coded callbacks. This makes the code more readable and more flexible. No functional change. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 50 ------- drivers/net/ethernet/stmicro/stmmac/hwif.h | 106 +++++++++++++++ .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 14 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 147 +++++++++------------ 4 files changed, 172 insertions(+), 145 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 2c50d8c0a557..b27221bd358c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -381,56 +381,6 @@ struct dma_features { extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; -/* Specific DMA helpers */ -struct stmmac_dma_ops { - /* DMA core initialization */ - int (*reset)(void __iomem *ioaddr); - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds); - void (*init_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, u32 chan); - void (*init_rx_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_rx_phy, u32 chan); - void (*init_tx_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx_phy, u32 chan); - /* Configure the AXI Bus Mode Register */ - void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); - /* Dump DMA registers */ - void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); - /* Set tx/rx threshold in the csr6 register - * An invalid value enables the store-and-forward mode */ - void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, - int rxfifosz); - void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz, u8 qmode); - void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz, u8 qmode); - /* To track extra statistic (if supported) */ - void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, - void __iomem *ioaddr); - void (*enable_dma_transmission) (void __iomem *ioaddr); - void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); - void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); - void (*start_tx)(void __iomem *ioaddr, u32 chan); - void (*stop_tx)(void __iomem *ioaddr, u32 chan); - void (*start_rx)(void __iomem *ioaddr, u32 chan); - void (*stop_rx)(void __iomem *ioaddr, u32 chan); - int (*dma_interrupt) (void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); - /* If supported then get the optional core features */ - void (*get_hw_feature)(void __iomem *ioaddr, - struct dma_features *dma_cap); - /* Program the HW RX Watchdog */ - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); - void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); - void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); - void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); -}; - struct mac_device_info; /* Helpers to program the MAC core */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 4994677f66cd..e1a9ae63f859 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -122,4 +122,110 @@ struct stmmac_desc_ops { #define stmmac_set_mss(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_mss, __args) +struct stmmac_dma_cfg; +struct dma_features; + +/* Specific DMA helpers */ +struct stmmac_dma_ops { + /* DMA core initialization */ + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx, u32 dma_rx, int atds); + void (*init_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan); + void (*init_rx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan); + void (*init_tx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); + /* Set tx/rx threshold in the csr6 register + * An invalid value enables the store-and-forward mode */ + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, + int rxfifosz); + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*start_tx)(void __iomem *ioaddr, u32 chan); + void (*stop_tx)(void __iomem *ioaddr, u32 chan); + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan); + /* If supported then get the optional core features */ + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); +}; + +#define stmmac_reset(__priv, __args...) \ + stmmac_do_callback(__priv, dma, reset, __args) +#define stmmac_dma_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init, __args) +#define stmmac_init_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_chan, __args) +#define stmmac_init_rx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_rx_chan, __args) +#define stmmac_init_tx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_tx_chan, __args) +#define stmmac_axi(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, axi, __args) +#define stmmac_dump_dma_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dump_regs, __args) +#define stmmac_dma_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_mode, __args) +#define stmmac_dma_rx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) +#define stmmac_dma_tx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args) +#define stmmac_dma_diagnostic_fr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) +#define stmmac_enable_dma_transmission(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) +#define stmmac_enable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args) +#define stmmac_disable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args) +#define stmmac_start_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_tx, __args) +#define stmmac_stop_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_tx, __args) +#define stmmac_start_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_rx, __args) +#define stmmac_stop_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_rx, __args) +#define stmmac_dma_interrupt_status(__priv, __args...) \ + stmmac_do_callback(__priv, dma, dma_interrupt, __args) +#define stmmac_get_hw_feature(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, get_hw_feature, __args) +#define stmmac_rx_watchdog(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) +#define stmmac_set_tx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args) +#define stmmac_set_rx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args) +#define stmmac_set_rx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args) +#define stmmac_set_tx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) +#define stmmac_enable_tso(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_tso, __args) + #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2c6ed47704fc..7009718dc7a6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -443,7 +443,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev, memset(reg_space, 0x0, REG_SPACE_SIZE); priv->hw->mac->dump_regs(priv->hw, reg_space); - priv->hw->dma->dump_regs(priv->ioaddr, reg_space); + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); /* Copy DMA registers to where ethtool expects them */ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); @@ -529,7 +529,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; unsigned long count; - int i, j = 0; + int i, j = 0, ret; if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { dump = priv->hw->mac->safety_feat_dump; @@ -541,11 +541,9 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, } /* Update the DMA HW counters for dwmac10/100 */ - if (priv->hw->dma->dma_diagnostic_fr) - priv->hw->dma->dma_diagnostic_fr(&dev->stats, - (void *) &priv->xstats, - priv->ioaddr); - else { + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, + priv->ioaddr); + if (ret) { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { dwmac_mmc_read(priv->mmcaddr, &priv->mmc); @@ -810,7 +808,7 @@ static int stmmac_set_coalesce(struct net_device *dev, priv->tx_coal_frames = ec->tx_max_coalesced_frames; priv->tx_coal_timer = ec->tx_coalesce_usecs; priv->rx_riwt = rx_riwt; - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); + stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c44e19d5dbf2..7f2aeaede682 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1677,7 +1677,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); - priv->hw->dma->start_rx(priv->ioaddr, chan); + stmmac_start_rx(priv, priv->ioaddr, chan); } /** @@ -1690,7 +1690,7 @@ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); - priv->hw->dma->start_tx(priv->ioaddr, chan); + stmmac_start_tx(priv, priv->ioaddr, chan); } /** @@ -1703,7 +1703,7 @@ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); - priv->hw->dma->stop_rx(priv->ioaddr, chan); + stmmac_stop_rx(priv, priv->ioaddr, chan); } /** @@ -1716,7 +1716,7 @@ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); - priv->hw->dma->stop_tx(priv->ioaddr, chan); + stmmac_stop_tx(priv, priv->ioaddr, chan); } /** @@ -1807,19 +1807,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) for (chan = 0; chan < rx_channels_count; chan++) { qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz, qmode); + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, + rxfifosz, qmode); } for (chan = 0; chan < tx_channels_count; chan++) { qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz, qmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, + txfifosz, qmode); } } else { - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, - rxfifosz); + stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); } } @@ -1927,16 +1926,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) netif_tx_unlock(priv->dev); } -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) -{ - priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); -} - -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) -{ - priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); -} - /** * stmmac_tx_err - to manage the tx error * @priv: driver private structure @@ -2000,13 +1989,12 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, txfifosz /= tx_channels_count; if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz, rxqmode); - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz, txqmode); + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, + rxqmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, + txqmode); } else { - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, - rxfifosz); + stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); } } @@ -2050,16 +2038,15 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) * all tx queues rather than just a single tx queue. */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr, - &priv->xstats, - chan); + status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); for (chan = 0; chan < rx_channel_count; chan++) { if (likely(status[chan] & handle_rx)) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); __napi_schedule(&rx_q->napi); poll_scheduled = true; } @@ -2080,7 +2067,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) &priv->rx_queue[0]; if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, chan); + stmmac_disable_dma_irq(priv, + priv->ioaddr, chan); __napi_schedule(&rx_q->napi); } break; @@ -2176,15 +2164,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { - u32 ret = 0; - - if (priv->hw->dma->get_hw_feature) { - priv->hw->dma->get_hw_feature(priv->ioaddr, - &priv->dma_cap); - ret = 1; - } - - return ret; + return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; } /** @@ -2234,7 +2214,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - ret = priv->hw->dma->reset(priv->ioaddr); + ret = stmmac_reset(priv, priv->ioaddr); if (ret) { dev_err(priv->device, "Failed to reset the dma\n"); return ret; @@ -2242,51 +2222,48 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->synopsys_id >= DWMAC_CORE_4_00) { /* DMA Configuration */ - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, - dummy_dma_tx_phy, dummy_dma_rx_phy, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, + dummy_dma_tx_phy, dummy_dma_rx_phy, atds); /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { rx_q = &priv->rx_queue[chan]; - priv->hw->dma->init_rx_chan(priv->ioaddr, - priv->plat->dma_cfg, - rx_q->dma_rx_phy, chan); + stmmac_init_rx_chan(priv, priv->ioaddr, + priv->plat->dma_cfg, rx_q->dma_rx_phy, + chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + (DMA_RX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, - rx_q->rx_tail_addr, - chan); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); } /* DMA TX Channel Configuration */ for (chan = 0; chan < tx_channels_count; chan++) { tx_q = &priv->tx_queue[chan]; - priv->hw->dma->init_chan(priv->ioaddr, - priv->plat->dma_cfg, - chan); + stmmac_init_chan(priv, priv->ioaddr, + priv->plat->dma_cfg, chan); - priv->hw->dma->init_tx_chan(priv->ioaddr, - priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); + stmmac_init_tx_chan(priv, priv->ioaddr, + priv->plat->dma_cfg, tx_q->dma_tx_phy, + chan); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (DMA_TX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, - tx_q->tx_tail_addr, - chan); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); } } else { rx_q = &priv->rx_queue[chan]; tx_q = &priv->tx_queue[chan]; - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); } - if (priv->plat->axi && priv->hw->dma->axi) - priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); return ret; } @@ -2332,18 +2309,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) u32 chan; /* set TX ring length */ - if (priv->hw->dma->set_tx_ring_len) { - for (chan = 0; chan < tx_channels_count; chan++) - priv->hw->dma->set_tx_ring_len(priv->ioaddr, - (DMA_TX_SIZE - 1), chan); - } + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_set_tx_ring_len(priv, priv->ioaddr, + (DMA_TX_SIZE - 1), chan); /* set RX ring length */ - if (priv->hw->dma->set_rx_ring_len) { - for (chan = 0; chan < rx_channels_count; chan++) - priv->hw->dma->set_rx_ring_len(priv->ioaddr, - (DMA_RX_SIZE - 1), chan); - } + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_set_rx_ring_len(priv, priv->ioaddr, + (DMA_RX_SIZE - 1), chan); } /** @@ -2619,9 +2592,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { - priv->rx_riwt = MAX_DMA_RIWT; - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); + if (priv->use_riwt) { + ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); + if (!ret) + priv->rx_riwt = MAX_DMA_RIWT; } if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) @@ -2633,7 +2607,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Enable TSO */ if (priv->tso) { for (chan = 0; chan < tx_cnt; chan++) - priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); } return 0; @@ -3058,8 +3032,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, - queue); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; @@ -3271,10 +3244,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); if (priv->synopsys_id < DWMAC_CORE_4_00) - priv->hw->dma->enable_dma_transmission(priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr); else - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, - queue); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, + queue); return NETDEV_TX_OK; @@ -3608,7 +3581,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget, rx_q->queue_index); if (work_done < budget) { napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, chan); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); } return work_done; } @@ -3778,11 +3751,11 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->hw->mac->host_mtl_irq_status(priv->hw, queue); - if (status & CORE_IRQ_MTL_RX_OVERFLOW && - priv->hw->dma->set_rx_tail_ptr) - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, - rx_q->rx_tail_addr, - queue); + if (status & CORE_IRQ_MTL_RX_OVERFLOW) + stmmac_set_rx_tail_ptr(priv, + priv->ioaddr, + rx_q->rx_tail_addr, + queue); } } -- cgit v1.2.3 From c10d4c82a5c84c22e1bd6be75478853c28a69d71 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 16 Apr 2018 16:08:14 +0100 Subject: net: stmmac: Switch stmmac_ops to generic HW Interface Helpers Switch stmmac_ops to generic Hardware Interface Helpers instead of using hard-coded callbacks. This makes the code more readable and more flexible. No functional change. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 70 ----------- drivers/net/ethernet/stmicro/stmmac/dwmac5.c | 19 +-- drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 6 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 138 +++++++++++++++++++++ .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 68 ++++------ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 122 +++++++++--------- 6 files changed, 235 insertions(+), 188 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b27221bd358c..0e0b6f19104d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -383,76 +383,6 @@ extern const struct stmmac_desc_ops ndesc_ops; struct mac_device_info; -/* Helpers to program the MAC core */ -struct stmmac_ops { - /* MAC core initialization */ - void (*core_init)(struct mac_device_info *hw, struct net_device *dev); - /* Enable the MAC RX/TX */ - void (*set_mac)(void __iomem *ioaddr, bool enable); - /* Enable and verify that the IPC module is supported */ - int (*rx_ipc)(struct mac_device_info *hw); - /* Enable RX Queues */ - void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); - /* RX Queues Priority */ - void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); - /* TX Queues Priority */ - void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); - /* RX Queues Routing */ - void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, - u32 queue); - /* Program RX Algorithms */ - void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); - /* Program TX Algorithms */ - void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); - /* Set MTL TX queues weight */ - void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, - u32 weight, u32 queue); - /* RX MTL queue to RX dma mapping */ - void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); - /* Configure AV Algorithm */ - void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, - u32 idle_slope, u32 high_credit, u32 low_credit, - u32 queue); - /* Dump MAC registers */ - void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); - /* Handle extra events on specific interrupts hw dependent */ - int (*host_irq_status)(struct mac_device_info *hw, - struct stmmac_extra_stats *x); - /* Handle MTL interrupts */ - int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); - /* Multicast filter setting */ - void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); - /* Flow control setting */ - void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time, u32 tx_cnt); - /* Set power management mode (e.g. magic frame) */ - void (*pmt)(struct mac_device_info *hw, unsigned long mode); - /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, - unsigned int reg_n); - void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, - unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw, - bool en_tx_lpi_clockgating); - void (*reset_eee_mode)(struct mac_device_info *hw); - void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); - void (*set_eee_pls)(struct mac_device_info *hw, int link); - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, - u32 rx_queues, u32 tx_queues); - /* PCS calls */ - void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, - bool loopback); - void (*pcs_rane)(void __iomem *ioaddr, bool restart); - void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); - /* Safety Features */ - int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); - bool (*safety_feat_irq_status)(struct net_device *ndev, - void __iomem *ioaddr, unsigned int asp, - struct stmmac_safety_stats *stats); - const char *(*safety_feat_dump)(struct stmmac_safety_stats *stats, - int index, unsigned long *count); -}; - /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 860de39999c7..2978550bb7f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -237,15 +237,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp) return 0; } -bool dwmac5_safety_feat_irq_status(struct net_device *ndev, +int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_stats *stats) { - bool ret = false, err, corr; + bool err, corr; u32 mtl, dma; + int ret = 0; if (!asp) - return false; + return -EINVAL; mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); @@ -282,17 +283,19 @@ static const struct dwmac5_error { { dwmac5_dma_errors }, }; -const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, - int index, unsigned long *count) +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc) { int module = index / 32, offset = index % 32; unsigned long *ptr = (unsigned long *)stats; if (module >= ARRAY_SIZE(dwmac5_all_errors)) - return NULL; + return -EINVAL; if (!dwmac5_all_errors[module].desc[offset].valid) - return NULL; + return -EINVAL; if (count) *count = *(ptr + index); - return dwmac5_all_errors[module].desc[offset].desc; + if (desc) + *desc = dwmac5_all_errors[module].desc[offset].desc; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index a0d2c44711b9..bd4c466d303e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -43,10 +43,10 @@ #define DMA_ECC_INT_STATUS 0x00001088 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); -bool dwmac5_safety_feat_irq_status(struct net_device *ndev, +int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_stats *stats); -const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, - int index, unsigned long *count); +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e1a9ae63f859..9575135c0699 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -228,4 +228,142 @@ struct stmmac_dma_ops { #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +struct mac_device_info; +struct net_device; +struct rgmii_adv; +struct stmmac_safety_stats; + +/* Helpers to program the MAC core */ +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); + /* RX Queues Priority */ + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* TX Queues Priority */ + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* RX Queues Routing */ + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, + u32 queue); + /* Program RX Algorithms */ + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); + /* Program TX Algorithms */ + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); + /* Set MTL TX queues weight */ + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, + u32 weight, u32 queue); + /* RX MTL queue to RX dma mapping */ + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); + /* Configure AV Algorithm */ + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, + u32 idle_slope, u32 high_credit, u32 low_credit, + u32 queue); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); + /* Handle MTL interrupts */ + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, u32 tx_cnt); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); + /* Safety Features */ + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); + int (*safety_feat_irq_status)(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); + int (*safety_feat_dump)(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); +}; + +#define stmmac_core_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_set(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac, __args) +#define stmmac_rx_ipc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rx_ipc, __args) +#define stmmac_rx_queue_enable(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args) +#define stmmac_rx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args) +#define stmmac_tx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args) +#define stmmac_rx_queue_routing(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args) +#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args) +#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args) +#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args) +#define stmmac_map_mtl_to_dma(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args) +#define stmmac_config_cbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, config_cbs, __args) +#define stmmac_dump_mac_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, dump_regs, __args) +#define stmmac_host_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_irq_status, __args) +#define stmmac_host_mtl_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args) +#define stmmac_set_filter(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_filter, __args) +#define stmmac_flow_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, flow_ctrl, __args) +#define stmmac_pmt(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pmt, __args) +#define stmmac_set_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) +#define stmmac_get_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) +#define stmmac_set_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) +#define stmmac_reset_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) +#define stmmac_set_eee_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) +#define stmmac_set_eee_pls(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_pls, __args) +#define stmmac_mac_debug(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, debug, __args) +#define stmmac_pcs_ctrl_ane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) +#define stmmac_pcs_rane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_rane, __args) +#define stmmac_pcs_get_adv_lp(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) +#define stmmac_safety_feat_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_config, __args) +#define stmmac_safety_feat_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) +#define stmmac_safety_feat_dump(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_dump, __args) + #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 7009718dc7a6..6d82b3ef5c3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -291,11 +291,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, cmd->base.speed = priv->xstats.pcs_speed; /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (!priv->hw->mac->pcs_get_adv_lp) + if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) return -EOPNOTSUPP; /* should never happen indeed */ - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ ethtool_convert_link_mode_to_legacy_u32( @@ -393,11 +391,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, ADVERTISED_10baseT_Full); spin_lock(&priv->lock); - - if (priv->hw->mac->pcs_ctrl_ane) - priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, - priv->hw->ps, 0); - + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); spin_unlock(&priv->lock); return 0; @@ -442,7 +436,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev, memset(reg_space, 0x0, REG_SPACE_SIZE); - priv->hw->mac->dump_regs(priv->hw, reg_space); + stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); /* Copy DMA registers to where ethtool expects them */ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], @@ -454,15 +448,13 @@ stmmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; pause->rx_pause = 0; pause->tx_pause = 0; - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { - struct rgmii_adv adv_lp; - + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { pause->autoneg = 1; - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); if (!adv_lp.pause) return; } else { @@ -488,12 +480,10 @@ stmmac_set_pauseparam(struct net_device *netdev, u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phy = netdev->phydev; int new_pause = FLOW_OFF; + struct rgmii_adv adv_lp; - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { - struct rgmii_adv adv_lp; - + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { pause->autoneg = 1; - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); if (!adv_lp.pause) return -EOPNOTSUPP; } else { @@ -515,27 +505,24 @@ stmmac_set_pauseparam(struct net_device *netdev, return phy_start_aneg(phy); } - priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl, + priv->pause, tx_cnt); return 0; } static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); struct stmmac_priv *priv = netdev_priv(dev); u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; unsigned long count; int i, j = 0, ret; - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; - + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - if (dump(&priv->sstats, i, &count)) + if (!stmmac_safety_feat_dump(priv, &priv->sstats, i, + &count, NULL)) data[j++] = count; } } @@ -563,11 +550,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, priv->xstats.phy_eee_wakeup_error_n = val; } - if ((priv->hw->mac->debug) && - (priv->synopsys_id >= DWMAC_CORE_3_50)) - priv->hw->mac->debug(priv->ioaddr, - (void *)&priv->xstats, - rx_queues_count, tx_queues_count); + if (priv->synopsys_id >= DWMAC_CORE_3_50) + stmmac_mac_debug(priv, priv->ioaddr, + (void *)&priv->xstats, + rx_queues_count, tx_queues_count); } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; @@ -579,8 +565,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); int i, len, safety_len = 0; switch (sset) { @@ -589,11 +573,11 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; - + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - if (dump(&priv->sstats, i, NULL)) + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, NULL)) safety_len++; } @@ -611,17 +595,15 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) int i; u8 *p = data; struct stmmac_priv *priv = netdev_priv(dev); - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); switch (stringset) { case ETH_SS_STATS: - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - const char *desc = dump(&priv->sstats, i, NULL); - - if (desc) { + const char *desc; + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, &desc)) { memcpy(p, desc, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7f2aeaede682..2b521035185d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -336,8 +336,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) - priv->hw->mac->set_eee_mode(priv->hw, - priv->plat->en_tx_lpi_clockgating); + stmmac_set_eee_mode(priv, priv->hw, + priv->plat->en_tx_lpi_clockgating); } /** @@ -348,7 +348,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) */ void stmmac_disable_eee_mode(struct stmmac_priv *priv) { - priv->hw->mac->reset_eee_mode(priv->hw); + stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; } @@ -411,8 +411,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv) if (priv->eee_active) { netdev_dbg(priv->dev, "disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); - priv->hw->mac->set_eee_timer(priv->hw, 0, - tx_lpi_timer); + stmmac_set_eee_timer(priv, priv->hw, 0, + tx_lpi_timer); } priv->eee_active = 0; spin_unlock_irqrestore(&priv->lock, flags); @@ -427,12 +427,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); - priv->hw->mac->set_eee_timer(priv->hw, - STMMAC_DEFAULT_LIT_LS, - tx_lpi_timer); + stmmac_set_eee_timer(priv, priv->hw, + STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); } /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); + stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); ret = true; spin_unlock_irqrestore(&priv->lock, flags); @@ -796,8 +795,8 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) { u32 tx_cnt = priv->plat->tx_queues_to_use; - priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, + priv->pause, tx_cnt); } /** @@ -1663,7 +1662,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) for (queue = 0; queue < rx_queues_count; queue++) { mode = priv->plat->rx_queues_cfg[queue].mode_to_use; - priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); + stmmac_rx_queue_enable(priv, priv->hw, mode, queue); } } @@ -2000,18 +1999,19 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) { - bool ret = false; + int ret = false; /* Safety features are only available in cores >= 5.10 */ if (priv->synopsys_id < DWMAC_CORE_5_10) return ret; - if (priv->hw->mac->safety_feat_irq_status) - ret = priv->hw->mac->safety_feat_irq_status(priv->dev, - priv->ioaddr, priv->dma_cap.asp, &priv->sstats); - - if (ret) + ret = stmmac_safety_feat_irq_status(priv, priv->dev, + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + if (ret && (ret != -EINVAL)) { stmmac_global_err(priv); - return ret; + return true; + } + + return false; } /** @@ -2177,8 +2177,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) static void stmmac_check_ether_addr(struct stmmac_priv *priv) { if (!is_valid_ether_addr(priv->dev->dev_addr)) { - priv->hw->mac->get_umac_addr(priv->hw, - priv->dev->dev_addr, 0); + stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); netdev_info(priv->dev, "device MAC address %pM\n", @@ -2332,7 +2331,7 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) for (queue = 0; queue < tx_queues_count; queue++) { weight = priv->plat->tx_queues_cfg[queue].weight; - priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); + stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); } } @@ -2353,7 +2352,7 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv) if (mode_to_use == MTL_QUEUE_DCB) continue; - priv->hw->mac->config_cbs(priv->hw, + stmmac_config_cbs(priv, priv->hw, priv->plat->tx_queues_cfg[queue].send_slope, priv->plat->tx_queues_cfg[queue].idle_slope, priv->plat->tx_queues_cfg[queue].high_credit, @@ -2375,7 +2374,7 @@ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) for (queue = 0; queue < rx_queues_count; queue++) { chan = priv->plat->rx_queues_cfg[queue].chan; - priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); + stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); } } @@ -2395,7 +2394,7 @@ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) continue; prio = priv->plat->rx_queues_cfg[queue].prio; - priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); + stmmac_rx_queue_prio(priv, priv->hw, prio, queue); } } @@ -2415,7 +2414,7 @@ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) continue; prio = priv->plat->tx_queues_cfg[queue].prio; - priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); + stmmac_tx_queue_prio(priv, priv->hw, prio, queue); } } @@ -2436,7 +2435,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; - priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); + stmmac_rx_queue_routing(priv, priv->hw, packet, queue); } } @@ -2450,50 +2449,47 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; - if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) + if (tx_queues_count > 1) stmmac_set_tx_queue_weight(priv); /* Configure MTL RX algorithms */ - if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) - priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, - priv->plat->rx_sched_algorithm); + if (rx_queues_count > 1) + stmmac_prog_mtl_rx_algorithms(priv, priv->hw, + priv->plat->rx_sched_algorithm); /* Configure MTL TX algorithms */ - if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) - priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, - priv->plat->tx_sched_algorithm); + if (tx_queues_count > 1) + stmmac_prog_mtl_tx_algorithms(priv, priv->hw, + priv->plat->tx_sched_algorithm); /* Configure CBS in AVB TX queues */ - if (tx_queues_count > 1 && priv->hw->mac->config_cbs) + if (tx_queues_count > 1) stmmac_configure_cbs(priv); /* Map RX MTL to DMA channels */ - if (priv->hw->mac->map_mtl_to_dma) - stmmac_rx_queue_dma_chan_map(priv); + stmmac_rx_queue_dma_chan_map(priv); /* Enable MAC RX Queues */ - if (priv->hw->mac->rx_queue_enable) - stmmac_mac_enable_rx_queues(priv); + stmmac_mac_enable_rx_queues(priv); /* Set RX priorities */ - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) + if (rx_queues_count > 1) stmmac_mac_config_rx_queues_prio(priv); /* Set TX priorities */ - if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) + if (tx_queues_count > 1) stmmac_mac_config_tx_queues_prio(priv); /* Set RX routing */ - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) + if (rx_queues_count > 1) stmmac_mac_config_rx_queues_routing(priv); } static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) { - if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) { + if (priv->dma_cap.asp) { netdev_info(priv->dev, "Enabling Safety Features\n"); - priv->hw->mac->safety_feat_config(priv->ioaddr, - priv->dma_cap.asp); + stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); } else { netdev_info(priv->dev, "No Safety Features support found\n"); } @@ -2528,7 +2524,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Copy the MAC addr into the HW */ - priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); + stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { @@ -2544,7 +2540,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Initialize the MAC Core */ - priv->hw->mac->core_init(priv->hw, dev); + stmmac_core_init(priv, priv->hw, dev); /* Initialize MTL*/ if (priv->synopsys_id >= DWMAC_CORE_4_00) @@ -2554,7 +2550,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->synopsys_id >= DWMAC_CORE_5_10) stmmac_safety_feat_configuration(priv); - ret = priv->hw->mac->rx_ipc(priv->hw); + ret = stmmac_rx_ipc(priv, priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); priv->plat->rx_coe = STMMAC_RX_COE_NONE; @@ -2562,7 +2558,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Enable the MAC Rx/Tx */ - priv->hw->mac->set_mac(priv->ioaddr, true); + stmmac_mac_set(priv, priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -2598,8 +2594,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->rx_riwt = MAX_DMA_RIWT; } - if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) - priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); + if (priv->hw->pcs) + stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); /* set TX and RX rings length */ stmmac_set_rings_length(priv); @@ -2778,7 +2774,7 @@ static int stmmac_release(struct net_device *dev) free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(dev); @@ -3614,7 +3610,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - priv->hw->mac->set_filter(priv->hw, dev); + stmmac_set_filter(priv, priv->hw, dev); } /** @@ -3687,7 +3683,7 @@ static int stmmac_set_features(struct net_device *netdev, /* No check needed because rx_coe has been set before and it will be * fixed in case of issue. */ - priv->hw->mac->rx_ipc(priv->hw); + stmmac_rx_ipc(priv, priv->hw); return 0; } @@ -3731,8 +3727,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { - int status = priv->hw->mac->host_irq_status(priv->hw, - &priv->xstats); + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); if (unlikely(status)) { /* For LPI we need to save the tx status */ @@ -3747,9 +3742,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - status |= - priv->hw->mac->host_mtl_irq_status(priv->hw, - queue); + status |= stmmac_host_mtl_irq_status(priv, + priv->hw, queue); if (status & CORE_IRQ_MTL_RX_OVERFLOW) stmmac_set_rx_tail_ptr(priv, @@ -3829,7 +3823,7 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) if (ret) return ret; - priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); + stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); return ret; } @@ -4418,7 +4412,7 @@ int stmmac_dvr_remove(struct device *dev) stmmac_stop_all_dma(priv); - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) @@ -4467,10 +4461,10 @@ int stmmac_suspend(struct device *dev) /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) { - priv->hw->mac->pmt(priv->hw, priv->wolopts); + stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); @@ -4534,7 +4528,7 @@ int stmmac_resume(struct device *dev) */ if (device_may_wakeup(priv->device)) { spin_lock_irqsave(&priv->lock, flags); - priv->hw->mac->pmt(priv->hw, 0); + stmmac_pmt(priv, priv->hw, 0); spin_unlock_irqrestore(&priv->lock, flags); priv->irq_wake = 0; } else { -- cgit v1.2.3 From cc4c9001ce31e0c3933fc67ac8c72899d6584b12 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 16 Apr 2018 16:08:15 +0100 Subject: net: stmmac: Switch stmmac_hwtimestamp to generic HW Interface Helpers Switch stmmac_hwtimestamp to generic Hardware Interface Helpers instead of using hard-coded callbacks. This makes the code more readable and more flexible. No functional change. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 12 -------- drivers/net/ethernet/stmicro/stmmac/hwif.h | 25 ++++++++++++++++ .../net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 34 ++++++++++++---------- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 17 +++++------ drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 18 ++++-------- 5 files changed, 56 insertions(+), 50 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 0e0b6f19104d..7291561ad217 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -383,18 +383,6 @@ extern const struct stmmac_desc_ops ndesc_ops; struct mac_device_info; -/* PTP and HW Timer helpers */ -struct stmmac_hwtimestamp { - void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); - u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, - int gmac4); - int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); - int (*config_addend) (void __iomem *ioaddr, u32 addend); - int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub, int gmac4); - u64(*get_systime) (void __iomem *ioaddr); -}; - extern const struct stmmac_hwtimestamp stmmac_ptp; extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 9575135c0699..e23c0a3448bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -366,4 +366,29 @@ struct stmmac_ops { #define stmmac_safety_feat_dump(__priv, __args...) \ stmmac_do_callback(__priv, mac, safety_feat_dump, __args) +/* PTP and HW Timer helpers */ +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend) (void __iomem *ioaddr, u32 addend); + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime) (void __iomem *ioaddr, u64 *systime); +}; + +#define stmmac_config_hw_tstamping(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args) +#define stmmac_config_sub_second_increment(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args) +#define stmmac_init_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, init_systime, __args) +#define stmmac_config_addend(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, config_addend, __args) +#define stmmac_adjust_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, adjust_systime, __args) +#define stmmac_get_systime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_systime, __args) + #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 08c19ebd5306..8d9cc2157afd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -24,13 +24,13 @@ #include "common.h" #include "stmmac_ptp.h" -static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { writel(data, ioaddr + PTP_TCR); } -static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, - u32 ptp_clock, int gmac4) +static void config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock, int gmac4, u32 *ssinc) { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; @@ -57,10 +57,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, writel(reg_value, ioaddr + PTP_SSIR); - return data; + if (ssinc) + *ssinc = data; } -static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) { int limit; u32 value; @@ -85,7 +86,7 @@ static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) return 0; } -static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) +static int config_addend(void __iomem *ioaddr, u32 addend) { u32 value; int limit; @@ -109,8 +110,8 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) return 0; } -static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub, int gmac4) +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4) { u32 value; int limit; @@ -152,7 +153,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, return 0; } -static u64 stmmac_get_systime(void __iomem *ioaddr) +static void get_systime(void __iomem *ioaddr, u64 *systime) { u64 ns; @@ -161,14 +162,15 @@ static u64 stmmac_get_systime(void __iomem *ioaddr) /* Get the TSS and convert sec time value to nanosecond */ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; - return ns; + if (systime) + *systime = ns; } const struct stmmac_hwtimestamp stmmac_ptp = { - .config_hw_tstamping = stmmac_config_hw_tstamping, - .init_systime = stmmac_init_systime, - .config_sub_second_increment = stmmac_config_sub_second_increment, - .config_addend = stmmac_config_addend, - .adjust_systime = stmmac_adjust_systime, - .get_systime = stmmac_get_systime, + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2b521035185d..896a1e76f2ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -707,18 +707,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); + stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); else { value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); + stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); /* program Sub Second Increment reg */ - sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ptpaddr, priv->plat->clk_ptp_rate, - priv->plat->has_gmac4); + stmmac_config_sub_second_increment(priv, + priv->ptpaddr, priv->plat->clk_ptp_rate, + priv->plat->has_gmac4, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); /* calculate default added value: @@ -728,15 +728,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) */ temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - priv->hw->ptp->config_addend(priv->ptpaddr, - priv->default_addend); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); /* initialize system time */ ktime_get_real_ts64(&now); /* lower 32 bits of tv_sec are safe until y2106 */ - priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, - now.tv_nsec); + stmmac_init_systime(priv, priv->ptpaddr, + (u32)now.tv_sec, now.tv_nsec); } return copy_to_user(ifr->ifr_data, &config, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index e471a903c654..7d3a5c7f5db6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -49,9 +49,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) addend = neg_adj ? (addend - diff) : (addend + diff); spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->config_addend(priv->ptpaddr, addend); - + stmmac_config_addend(priv, priv->ptpaddr, addend); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; @@ -84,10 +82,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) nsec = reminder; spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, - priv->plat->has_gmac4); - + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, + priv->plat->has_gmac4); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; @@ -110,9 +106,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; spin_lock_irqsave(&priv->ptp_lock, flags); - - ns = priv->hw->ptp->get_systime(priv->ptpaddr); - + stmmac_get_systime(priv, priv->ptpaddr, &ns); spin_unlock_irqrestore(&priv->ptp_lock, flags); *ts = ns_to_timespec64(ns); @@ -137,9 +131,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, unsigned long flags; spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); - + stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; -- cgit v1.2.3 From 2c520b1c9cfa7d43fd251fa47bac6bd953ec8239 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 16 Apr 2018 16:08:16 +0100 Subject: net: stmmac: Switch stmmac_mode_ops to generic HW Interface Helpers Switch stmmac_mode_ops to generic Hardware Interface Helpers instead of using hard-coded callbacks. This makes the code more readable and more flexible. No functional change. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 20 +++++------ drivers/net/ethernet/stmicro/stmmac/common.h | 12 ------- drivers/net/ethernet/stmicro/stmmac/hwif.h | 27 ++++++++++++++ drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 24 ++++++------- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 43 ++++++++++------------- 5 files changed, 68 insertions(+), 58 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index ca0f9c205f9d..b9c9003060c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -24,7 +24,7 @@ #include "stmmac.h" -static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); @@ -93,7 +93,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return entry; } -static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +static unsigned int is_jumbo_frm(int len, int enh_desc) { unsigned int ret = 0; @@ -105,7 +105,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, +static void init_dma_chain(void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc) { /* @@ -135,7 +135,7 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, } } -static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +static void refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; struct stmmac_priv *priv = rx_q->priv_data; @@ -151,7 +151,7 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) sizeof(struct dma_desc))); } -static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +static void clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; struct stmmac_priv *priv = tx_q->priv_data; @@ -169,9 +169,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) } const struct stmmac_mode_ops chain_mode_ops = { - .init = stmmac_init_dma_chain, - .is_jumbo_frm = stmmac_is_jumbo_frm, - .jumbo_frm = stmmac_jumbo_frm, - .refill_desc3 = stmmac_refill_desc3, - .clean_desc3 = stmmac_clean_desc3, + .init = init_dma_chain, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .clean_desc3 = clean_desc3, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7291561ad217..59673c6d2e52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -405,18 +405,6 @@ struct mii_regs { unsigned int clk_csr_mask; }; -/* Helpers to manage the descriptors for chain and ring modes */ -struct stmmac_mode_ops { - void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, - unsigned int extend_desc); - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); - int (*set_16kib_bfsize)(int mtu); - void (*init_desc3)(struct dma_desc *p); - void (*refill_desc3) (void *priv, struct dma_desc *p); - void (*clean_desc3) (void *priv, struct dma_desc *p); -}; - struct mac_device_info { const struct stmmac_ops *mac; const struct stmmac_desc_ops *desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e23c0a3448bb..f81ded4a9946 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -391,4 +391,31 @@ struct stmmac_hwtimestamp { #define stmmac_get_systime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_systime, __args) +/* Helpers to manage the descriptors for chain and ring modes */ +struct stmmac_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + +#define stmmac_mode_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init, __args) +#define stmmac_is_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, is_jumbo_frm, __args) +#define stmmac_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, jumbo_frm, __args) +#define stmmac_set_16kib_bfsize(__priv, __args...) \ + stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args) +#define stmmac_init_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init_desc3, __args) +#define stmmac_refill_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, refill_desc3, __args) +#define stmmac_clean_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) + #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 808ca758d0ae..a7ffc73fffe8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -24,7 +24,7 @@ #include "stmmac.h" -static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); @@ -99,7 +99,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return entry; } -static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +static unsigned int is_jumbo_frm(int len, int enh_desc) { unsigned int ret = 0; @@ -109,7 +109,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +static void refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; @@ -119,12 +119,12 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) } /* In ring mode we need to fill the desc3 because it is used as buffer */ -static void stmmac_init_desc3(struct dma_desc *p) +static void init_desc3(struct dma_desc *p) { p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } -static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +static void clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; struct stmmac_priv *priv = tx_q->priv_data; @@ -137,7 +137,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) p->des3 = 0; } -static int stmmac_set_16kib_bfsize(int mtu) +static int set_16kib_bfsize(int mtu) { int ret = 0; if (unlikely(mtu >= BUF_SIZE_8KiB)) @@ -146,10 +146,10 @@ static int stmmac_set_16kib_bfsize(int mtu) } const struct stmmac_mode_ops ring_mode_ops = { - .is_jumbo_frm = stmmac_is_jumbo_frm, - .jumbo_frm = stmmac_jumbo_frm, - .refill_desc3 = stmmac_refill_desc3, - .init_desc3 = stmmac_init_desc3, - .clean_desc3 = stmmac_clean_desc3, - .set_16kib_bfsize = stmmac_set_16kib_bfsize, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .init_desc3 = init_desc3, + .clean_desc3 = clean_desc3, + .set_16kib_bfsize = set_16kib_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 896a1e76f2ec..90363a8b15db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1161,9 +1161,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, else p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); - if ((priv->hw->mode->init_desc3) && - (priv->dma_buf_sz == BUF_SIZE_16KiB)) - priv->hw->mode->init_desc3(p); + if (priv->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); return 0; } @@ -1229,13 +1228,14 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; - unsigned int bfsize = 0; int ret = -ENOMEM; + int bfsize = 0; int queue; int i; - if (priv->hw->mode->set_16kib_bfsize) - bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); + if (bfsize < 0) + bfsize = 0; if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); @@ -1279,13 +1279,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) - priv->hw->mode->init(rx_q->dma_erx, - rx_q->dma_rx_phy, - DMA_RX_SIZE, 1); + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, DMA_RX_SIZE, 1); else - priv->hw->mode->init(rx_q->dma_rx, - rx_q->dma_rx_phy, - DMA_RX_SIZE, 0); + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, DMA_RX_SIZE, 0); } } @@ -1332,13 +1330,11 @@ static int init_dma_tx_desc_rings(struct net_device *dev) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) - priv->hw->mode->init(tx_q->dma_etx, - tx_q->dma_tx_phy, - DMA_TX_SIZE, 1); + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, DMA_TX_SIZE, 1); else - priv->hw->mode->init(tx_q->dma_tx, - tx_q->dma_tx_phy, - DMA_TX_SIZE, 0); + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, DMA_TX_SIZE, 0); } for (i = 0; i < DMA_TX_SIZE; i++) { @@ -1886,8 +1882,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff_dma[entry].map_as_page = false; } - if (priv->hw->mode->clean_desc3) - priv->hw->mode->clean_desc3(tx_q, p); + stmmac_clean_desc3(priv, tx_q, p); tx_q->tx_skbuff_dma[entry].last_segment = false; tx_q->tx_skbuff_dma[entry].is_jumbo = false; @@ -3099,11 +3094,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) - is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); + is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { - entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); + entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; } @@ -3332,8 +3327,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) } else { p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); } - if (priv->hw->mode->refill_desc3) - priv->hw->mode->refill_desc3(rx_q, p); + + stmmac_refill_desc3(priv, rx_q, p); if (rx_q->rx_zeroc_thresh > 0) rx_q->rx_zeroc_thresh--; -- cgit v1.2.3 From 8d98aa39b8d497e4c3da8c3973456ff710b68693 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 16 Apr 2018 21:38:27 +0200 Subject: r8169: replace magic numbers with PCI MRRS constant Replace magic number "0x5 << MAX_READ_REQUEST_SHIFT" with the appropriate constant as defined in PCI core. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 39 ++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 604ae78381ae..7d4e68907055 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -84,7 +84,6 @@ The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -#define MAX_READ_REQUEST_SHIFT 12 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -5125,7 +5124,7 @@ static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5151,7 +5150,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, 0x0c); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5163,7 +5162,7 @@ static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) { rtl_tx_performance_tweak(tp, - (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); } static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5736,7 +5735,7 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp) RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); } } @@ -5757,7 +5756,7 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_disable_clock_request(tp); @@ -5788,7 +5787,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -5805,7 +5804,7 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -5862,7 +5861,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -5872,7 +5871,7 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5889,7 +5888,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5921,7 +5920,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5946,7 +5945,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -5976,7 +5975,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) { rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -6047,7 +6046,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6147,7 +6146,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6229,7 +6228,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6495,7 +6494,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) RTL_W8(tp, DBG_REG, FIX_NAK_1); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, Config1, LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); @@ -6512,7 +6511,7 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -6575,7 +6574,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); -- cgit v1.2.3 From 897ddc24835ac9e267d70f1a77e75d30a0a636e9 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 16 Apr 2018 23:30:53 -0700 Subject: liquidio: Enhanced ethtool stats 1. Added red_drops stats. Inbound packets dropped by RED, buffer exhaustion 2. Included fcs_err, jabber_err, l2_err and frame_err errors under rx_errors 3. Included fifo_err, dmac_drop, red_drops, fw_err_pko, fw_err_link and fw_err_drop under rx_dropped 4. Included max_collision_fail, max_deferral_fail, total_collisions, fw_err_pko, fw_err_link, fw_err_drop and fw_err_pki under tx_dropped 5. Counting dma mapping errors 6. Added some firmware stats description and removed for some Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Acked-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 54 ++++++++++------ drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 + .../net/ethernet/cavium/liquidio/liquidio_common.h | 75 ++++++++++++++-------- drivers/net/ethernet/cavium/liquidio/octeon_iq.h | 4 +- 4 files changed, 86 insertions(+), 49 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 550ac29682a5..9926a12dd805 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -96,11 +96,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_bytes", "tx_bytes", - "rx_errors", /*jabber_err+l2_err+frame_err */ - "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ - "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + - *st->fromwire.dmac_drop + st->fromwire.fw_err_drop - */ + "rx_errors", + "tx_errors", + "rx_dropped", "tx_dropped", "tx_total_sent", @@ -119,7 +117,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "mac_tx_total_bytes", "mac_tx_mcast_pkts", "mac_tx_bcast_pkts", - "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ + "mac_tx_ctl_packets", "mac_tx_total_collisions", "mac_tx_one_collision", "mac_tx_multi_collison", @@ -170,17 +168,17 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_bytes", "tx_bytes", - "rx_errors", /* jabber_err + l2_err+frame_err */ - "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */ - "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */ + "rx_errors", + "tx_errors", + "rx_dropped", "tx_dropped", "link_state_changes", }; /* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ - "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ + "packets", + "bytes", "dropped", "iq_busy", "sgentry_sent", @@ -197,13 +195,9 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { /* statistics of host rx queue */ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ - "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ - "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ - *oct->droq[oq_no]->stats.dropped_nodispatch+ - *oct->droq[oq_no]->stats.dropped_toomany+ - *oct->droq[oq_no]->stats.dropped_nomem - */ + "packets", + "bytes", + "dropped", "dropped_nomem", "dropped_toomany", "fw_dropped", @@ -1080,16 +1074,33 @@ lio_get_ethtool_stats(struct net_device *netdev, data[i++] = CVM_CAST64(netstats->rx_bytes); /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ data[i++] = CVM_CAST64(netstats->tx_bytes); - data[i++] = CVM_CAST64(netstats->rx_errors); + data[i++] = CVM_CAST64(netstats->rx_errors + + oct_dev->link_stats.fromwire.fcs_err + + oct_dev->link_stats.fromwire.jabber_err + + oct_dev->link_stats.fromwire.l2_err + + oct_dev->link_stats.fromwire.frame_err); data[i++] = CVM_CAST64(netstats->tx_errors); /*sum of oct->droq[oq_no]->stats->rx_dropped + *oct->droq[oq_no]->stats->dropped_nodispatch + *oct->droq[oq_no]->stats->dropped_toomany + *oct->droq[oq_no]->stats->dropped_nomem */ - data[i++] = CVM_CAST64(netstats->rx_dropped); + data[i++] = CVM_CAST64(netstats->rx_dropped + + oct_dev->link_stats.fromwire.fifo_err + + oct_dev->link_stats.fromwire.dmac_drop + + oct_dev->link_stats.fromwire.red_drops + + oct_dev->link_stats.fromwire.fw_err_pko + + oct_dev->link_stats.fromwire.fw_err_link + + oct_dev->link_stats.fromwire.fw_err_drop); /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = CVM_CAST64(netstats->tx_dropped); + data[i++] = CVM_CAST64(netstats->tx_dropped + + oct_dev->link_stats.fromhost.max_collision_fail + + oct_dev->link_stats.fromhost.max_deferral_fail + + oct_dev->link_stats.fromhost.total_collisions + + oct_dev->link_stats.fromhost.fw_err_pko + + oct_dev->link_stats.fromhost.fw_err_link + + oct_dev->link_stats.fromhost.fw_err_drop + + oct_dev->link_stats.fromhost.fw_err_pki); /* firmware tx stats */ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. @@ -1798,6 +1809,7 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, rstats->jabber_err = rsp_rstats->jabber_err; rstats->l2_err = rsp_rstats->l2_err; rstats->frame_err = rsp_rstats->frame_err; + rstats->red_drops = rsp_rstats->red_drops; /* RX firmware stats */ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 603a144d3d9c..44c2a0c1bbdd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2585,6 +2585,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", __func__); + stats->tx_dmamap_fail++; return NETDEV_TX_BUSY; } @@ -2624,6 +2625,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", __func__); + stats->tx_dmamap_fail++; return NETDEV_TX_BUSY; } add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 75eea83c7cc6..34a94daca590 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -779,18 +779,24 @@ struct liquidio_if_cfg_info { /** Stats for each NIC port in RX direction. */ struct nic_rx_stats { /* link-level stats */ - u64 total_rcvd; - u64 bytes_rcvd; - u64 total_bcst; - u64 total_mcst; - u64 runts; - u64 ctl_rcvd; - u64 fifo_err; /* Accounts for over/under-run of buffers */ - u64 dmac_drop; - u64 fcs_err; - u64 jabber_err; - u64 l2_err; - u64 frame_err; + u64 total_rcvd; /* Received packets */ + u64 bytes_rcvd; /* Octets of received packets */ + u64 total_bcst; /* Number of non-dropped L2 broadcast packets */ + u64 total_mcst; /* Number of non-dropped L2 multicast packets */ + u64 runts; /* Packets shorter than allowed */ + u64 ctl_rcvd; /* Received PAUSE packets */ + u64 fifo_err; /* Packets dropped due to RX FIFO full */ + u64 dmac_drop; /* Packets dropped by the DMAC filter */ + u64 fcs_err; /* Sum of fragment, overrun, and FCS errors */ + u64 jabber_err; /* Packets larger than allowed */ + u64 l2_err; /* Sum of DMA, parity, PCAM access, no memory, + * buffer overflow, malformed L2 header or + * length, oversize errors + **/ + u64 frame_err; /* Sum of IPv4 and L4 checksum errors */ + u64 red_drops; /* Packets dropped by RED due to buffer + * exhaustion + **/ /* firmware stats */ u64 fw_total_rcvd; @@ -806,11 +812,11 @@ struct nic_rx_stats { u64 fw_lro_pkts; /* Number of packets that are LROed */ u64 fw_lro_octs; /* Number of octets that are LROed */ u64 fw_total_lro; /* Number of LRO packets formed */ - u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + u64 fw_lro_aborts; /* Number of times LRO of packet aborted */ u64 fw_lro_aborts_port; u64 fw_lro_aborts_seq; u64 fw_lro_aborts_tsval; - u64 fw_lro_aborts_timer; + u64 fw_lro_aborts_timer; /* Timer setting error */ /* intrmod: packet forward rate */ u64 fwd_rate; }; @@ -818,18 +824,35 @@ struct nic_rx_stats { /** Stats for each NIC port in RX direction. */ struct nic_tx_stats { /* link-level stats */ - u64 total_pkts_sent; - u64 total_bytes_sent; - u64 mcast_pkts_sent; - u64 bcast_pkts_sent; - u64 ctl_sent; - u64 one_collision_sent; /* Packets sent after one collision*/ - u64 multi_collision_sent; /* Packets sent after multiple collision*/ - u64 max_collision_fail; /* Packets not sent due to max collisions */ - u64 max_deferral_fail; /* Packets not sent due to max deferrals */ - u64 fifo_err; /* Accounts for over/under-run of buffers */ - u64 runts; - u64 total_collisions; /* Total number of collisions detected */ + u64 total_pkts_sent; /* Total frames sent on the interface */ + u64 total_bytes_sent; /* Total octets sent on the interface */ + u64 mcast_pkts_sent; /* Packets sent to the multicast DMAC */ + u64 bcast_pkts_sent; /* Packets sent to a broadcast DMAC */ + u64 ctl_sent; /* Control/PAUSE packets sent */ + u64 one_collision_sent; /* Packets sent that experienced a + * single collision before successful + * transmission + **/ + u64 multi_collision_sent; /* Packets sent that experienced + * multiple collisions before successful + * transmission + **/ + u64 max_collision_fail; /* Packets dropped due to excessive + * collisions + **/ + u64 max_deferral_fail; /* Packets not sent due to max + * deferrals + **/ + u64 fifo_err; /* Packets sent that experienced a + * transmit underflow and were + * truncated + **/ + u64 runts; /* Packets sent with an octet count + * lessthan 64 + **/ + u64 total_collisions; /* Packets dropped due to excessive + * collisions + **/ /* firmware stats */ u64 fw_total_sent; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 81c987682941..5fed7b63223e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -62,8 +62,8 @@ struct oct_iq_stats { u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ u64 tx_gso; /* count of tso */ u64 tx_vxlan; /* tunnel */ - u64 tx_dmamap_fail; - u64 tx_restart; + u64 tx_dmamap_fail; /* Number of times dma mapping failed */ + u64 tx_restart; /* Number of times this queue restarted */ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) -- cgit v1.2.3 From 5168d73201898c581d0503434ee9743cfc8f964b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:45:21 +0200 Subject: mlx5: basic XDP_REDIRECT forward support This implements basic XDP redirect support in mlx5 driver. Notice that the ndo_xdp_xmit() is NOT implemented, because that API need some changes that this patchset is working towards. The main purpose of this patch is have different drivers doing XDP_REDIRECT to show how different memory models behave in a cross driver world. Update(pre-RFCv2 Tariq): Need to DMA unmap page before xdp_do_redirect, as the return API does not exist yet to to keep this mapped. Update(pre-RFCv3 Saeed): Don't mix XDP_TX and XDP_REDIRECT flushing, introduce xdpsq.db.redirect_flush boolian. V9: Adjust for commit 121e89275471 ("net/mlx5e: Refactor RQ XDP_TX indication") Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Tariq Toukan Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 27 ++++++++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 30cad07be2b5..1a05d1072c5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -392,6 +392,7 @@ struct mlx5e_xdpsq { struct { struct mlx5e_dma_info *di; bool doorbell; + bool redirect_flush; } db; /* read only */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 176645762e49..0e24be05907f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -236,14 +236,20 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return 0; } +static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, + struct mlx5e_dma_info *dma_info) +{ + dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), + rq->buff.map_dir); +} + void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle) { if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info)) return; - dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), - rq->buff.map_dir); + mlx5e_page_dma_unmap(rq, dma_info); put_page(dma_info->page); } @@ -800,9 +806,10 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len) { - const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; + int err; if (!prog) return false; @@ -823,6 +830,15 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) trace_xdp_exception(rq->netdev, prog, act); return true; + case XDP_REDIRECT: + /* When XDP enabled then page-refcnt==1 here */ + err = xdp_do_redirect(rq->netdev, &xdp, prog); + if (!err) { + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); + rq->xdpsq.db.redirect_flush = true; + mlx5e_page_dma_unmap(rq, di); + } + return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -1140,6 +1156,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) xdpsq->db.doorbell = false; } + if (xdpsq->db.redirect_flush) { + xdp_do_flush_map(); + xdpsq->db.redirect_flush = false; + } + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ -- cgit v1.2.3 From 189ead81a83eba5f5c5ce56c45620e51abcb5cb8 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:45:32 +0200 Subject: ixgbe: use xdp_return_frame API Extend struct ixgbe_tx_buffer to store the xdp_mem_info. Notice that this could be optimized further by putting this into a union in the struct ixgbe_tx_buffer, but this patchset works towards removing this again. Thus, this is not done. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4f08c712e58e..abb5248e917e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -250,6 +250,7 @@ struct ixgbe_tx_buffer { DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; + struct xdp_mem_info xdp_mem; }; struct ixgbe_rx_buffer { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index afadba99f7b8..0bfe6cf2bf8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1216,7 +1216,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, /* free the skb */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buffer->data); + xdp_return_frame(tx_buffer->data, &tx_buffer->xdp_mem); else napi_consume_skb(tx_buffer->skb, napi_budget); @@ -5797,7 +5797,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) /* Free all the Tx ring sk_buffs */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buffer->data); + xdp_return_frame(tx_buffer->data, &tx_buffer->xdp_mem); else dev_kfree_skb_any(tx_buffer->skb); @@ -8366,6 +8366,8 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); tx_buffer->data = xdp->data; + tx_buffer->xdp_mem = xdp->rxq->mem; + tx_desc->read.buffer_addr = cpu_to_le64(dma); /* put descriptor type bits */ -- cgit v1.2.3 From 1ffcbc8537d0bc32aaca7000cb9c904ec4b6300f Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:45:47 +0200 Subject: tun: convert to use generic xdp_frame and xdp_return_frame API The tuntap driver invented it's own driver specific way of queuing XDP packets, by storing the xdp_buff information in the top of the XDP frame data. Convert it over to use the more generic xdp_frame structure. The main problem with the in-driver method is that the xdp_rxq_info pointer cannot be trused/used when dequeueing the frame. V3: Remove check based on feedback from Jason Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/tun.c | 43 ++++++++++++++++++++----------------------- drivers/vhost/net.c | 7 ++++--- include/linux/if_tun.h | 4 ++-- 3 files changed, 26 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 28583aa0c17d..2c85e5cac2a9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -248,11 +248,11 @@ struct veth { __be16 h_vlan_TCI; }; -bool tun_is_xdp_buff(void *ptr) +bool tun_is_xdp_frame(void *ptr) { return (unsigned long)ptr & TUN_XDP_FLAG; } -EXPORT_SYMBOL(tun_is_xdp_buff); +EXPORT_SYMBOL(tun_is_xdp_frame); void *tun_xdp_to_ptr(void *ptr) { @@ -660,10 +660,10 @@ void tun_ptr_free(void *ptr) { if (!ptr) return; - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - put_page(virt_to_head_page(xdp->data)); + xdp_return_frame(xdpf->data, &xdpf->mem); } else { __skb_array_destroy_skb(ptr); } @@ -1298,17 +1298,14 @@ static const struct net_device_ops tun_netdev_ops = { static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) { struct tun_struct *tun = netdev_priv(dev); - struct xdp_buff *buff = xdp->data_hard_start; - int headroom = xdp->data - xdp->data_hard_start; + struct xdp_frame *frame; struct tun_file *tfile; u32 numqueues; int ret = 0; - /* Assure headroom is available and buff is properly aligned */ - if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) - return -ENOSPC; - - *buff = *xdp; + frame = convert_to_xdp_frame(xdp); + if (unlikely(!frame)) + return -EOVERFLOW; rcu_read_lock(); @@ -1323,7 +1320,7 @@ static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) /* Encode the XDP flag into lowest bit for consumer to differ * XDP buffer from sk_buff. */ - if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { + if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(frame))) { this_cpu_inc(tun->pcpu_stats->tx_dropped); ret = -ENOSPC; } @@ -2001,11 +1998,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, - struct xdp_buff *xdp, + struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; - size_t size = xdp->data_end - xdp->data; + size_t size = xdp_frame->len; struct tun_pcpu_stats *stats; size_t ret; @@ -2021,7 +2018,7 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun, iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } - ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; + ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); @@ -2189,11 +2186,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, return err; } - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - ret = tun_put_user_xdp(tun, tfile, xdp, to); - put_page(virt_to_head_page(xdp->data)); + ret = tun_put_user_xdp(tun, tfile, xdpf, to); + xdp_return_frame(xdpf->data, &xdpf->mem); } else { struct sk_buff *skb = ptr; @@ -2432,10 +2429,10 @@ out_free: static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - return xdp->data_end - xdp->data; + return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 986058a57917..bbf38befefb2 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -32,6 +32,7 @@ #include #include +#include #include "vhost.h" @@ -181,10 +182,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) static int vhost_net_buf_peek_len(void *ptr) { - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - return xdp->data_end - xdp->data; + return xdpf->len; } return __skb_array_len_with_tag(ptr); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index fd00170b494f..3d2996dc7d85 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -22,7 +22,7 @@ #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); -bool tun_is_xdp_buff(void *ptr); +bool tun_is_xdp_frame(void *ptr); void *tun_xdp_to_ptr(void *ptr); void *tun_ptr_to_xdp(void *ptr); void tun_ptr_free(void *ptr); @@ -39,7 +39,7 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } -static inline bool tun_is_xdp_buff(void *ptr) +static inline bool tun_is_xdp_frame(void *ptr) { return false; } -- cgit v1.2.3 From cac320c850efb25480cd0f71383b84ec61c0e138 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:45:52 +0200 Subject: virtio_net: convert to use generic xdp_frame and xdp_return_frame API The virtio_net driver assumes XDP frames are always released based on page refcnt (via put_page). Thus, is only queues the XDP data pointer address and uses virt_to_head_page() to retrieve struct page. Use the XDP return API to get away from such assumptions. Instead queue an xdp_frame, which allow us to use the xdp_return_frame API, when releasing the frame. V8: Avoid endianness issues (found by kbuild test robot) V9: Change __virtnet_xdp_xmit from bool to int return value (found by Dan Carpenter) Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 54 ++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b187ec7411e..f50e1ad81ad4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -415,38 +415,48 @@ static void virtnet_xdp_flush(struct net_device *dev) virtqueue_kick(sq->vq); } -static bool __virtnet_xdp_xmit(struct virtnet_info *vi, - struct xdp_buff *xdp) +static int __virtnet_xdp_xmit(struct virtnet_info *vi, + struct xdp_buff *xdp) { struct virtio_net_hdr_mrg_rxbuf *hdr; - unsigned int len; + struct xdp_frame *xdpf, *xdpf_sent; struct send_queue *sq; + unsigned int len; unsigned int qp; - void *xdp_sent; int err; qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; /* Free up any pending old buffers before queueing new ones. */ - while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { - struct page *sent_page = virt_to_head_page(xdp_sent); + while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) + xdp_return_frame(xdpf_sent->data, &xdpf_sent->mem); - put_page(sent_page); - } + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return -EOVERFLOW; + + /* virtqueue want to use data area in-front of packet */ + if (unlikely(xdpf->metasize > 0)) + return -EOPNOTSUPP; - xdp->data -= vi->hdr_len; + if (unlikely(xdpf->headroom < vi->hdr_len)) + return -EOVERFLOW; + + /* Make room for virtqueue hdr (also change xdpf->headroom?) */ + xdpf->data -= vi->hdr_len; /* Zero header and leave csum up to XDP layers */ - hdr = xdp->data; + hdr = xdpf->data; memset(hdr, 0, vi->hdr_len); + xdpf->len += vi->hdr_len; - sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); + sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); if (unlikely(err)) - return false; /* Caller handle free/refcnt */ + return -ENOSPC; /* Caller handle free/refcnt */ - return true; + return 0; } static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) @@ -454,7 +464,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; struct bpf_prog *xdp_prog; - bool sent; /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. @@ -463,10 +472,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (!xdp_prog) return -ENXIO; - sent = __virtnet_xdp_xmit(vi, xdp); - if (!sent) - return -ENOSPC; - return 0; + return __virtnet_xdp_xmit(vi, xdp); } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -555,7 +561,6 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); unsigned int delta = 0; struct page *xdp_page; - bool sent; int err; len -= vi->hdr_len; @@ -606,8 +611,8 @@ static struct sk_buff *receive_small(struct net_device *dev, delta = orig_data - xdp.data; break; case XDP_TX: - sent = __virtnet_xdp_xmit(vi, &xdp); - if (unlikely(!sent)) { + err = __virtnet_xdp_xmit(vi, &xdp); + if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -690,7 +695,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct bpf_prog *xdp_prog; unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); - bool sent; int err; head_skb = NULL; @@ -762,8 +766,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - sent = __virtnet_xdp_xmit(vi, &xdp); - if (unlikely(!sent)) { + err = __virtnet_xdp_xmit(vi, &xdp); + if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); -- cgit v1.2.3 From b411ef11020d012790839a5414040283d9335386 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:02 +0200 Subject: i40e: convert to use generic xdp_frame and xdp_return_frame API Also convert driver i40e, which very recently got XDP_REDIRECT support in commit d9314c474d4f ("i40e: add support for XDP_REDIRECT"). V7: This patch got added in V7 of this patchset. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 20 +++++++++++++++----- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 + 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f174c72480ab..96c54cbfb1f9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -638,7 +638,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else if (ring_is_xdp(ring)) - page_frag_free(tx_buffer->raw_buf); + xdp_return_frame(tx_buffer->xdpf->data, + &tx_buffer->xdpf->mem); else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) @@ -841,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* free the skb/XDP data */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buf->raw_buf); + xdp_return_frame(tx_buf->xdpf->data, &tx_buf->xdpf->mem); else napi_consume_skb(tx_buf->skb, napi_budget); @@ -2225,6 +2226,8 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, if (!xdp_prog) goto xdp_out; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: @@ -3481,25 +3484,32 @@ dma_error: static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) { - u32 size = xdp->data_end - xdp->data; u16 i = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; + struct xdp_frame *xdpf; dma_addr_t dma; + u32 size; + + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return I40E_XDP_CONSUMED; + + size = xdpf->len; if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { xdp_ring->tx_stats.tx_busy++; return I40E_XDP_CONSUMED; } - dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE); + dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdp_ring->dev, dma)) return I40E_XDP_CONSUMED; tx_bi = &xdp_ring->tx_bi[i]; tx_bi->bytecount = size; tx_bi->gso_segs = 1; - tx_bi->raw_buf = xdp->data; + tx_bi->xdpf = xdpf; /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3043483ec426..857b1d743c8d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -306,6 +306,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; union { + struct xdp_frame *xdpf; struct sk_buff *skb; void *raw_buf; }; -- cgit v1.2.3 From 84f5e3fb790547860fbbe7e0ef87104d4922dd1a Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:07 +0200 Subject: mlx5: register a memory model when XDP is enabled Now all the users of ndo_xdp_xmit have been converted to use xdp_return_frame. This enable a different memory model, thus activating another code path in the xdp_return_frame API. V2: Fixed issues pointed out by Tariq. Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Tariq Toukan Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b29c1d93f058..2dca0933dfd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -512,6 +512,14 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->mkey_be = c->mkey_be; } + /* This must only be activate for order-0 pages */ + if (rq->xdp_prog) { + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) + goto err_rq_wq_destroy; + } + for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); -- cgit v1.2.3 From 8d5d88527587516bd58ff0f3810f07c38e65e2be Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:12 +0200 Subject: xdp: rhashtable with allocator ID to pointer mapping Use the IDA infrastructure for getting a cyclic increasing ID number, that is used for keeping track of each registered allocator per RX-queue xdp_rxq_info. Instead of using the IDR infrastructure, which uses a radix tree, use a dynamic rhashtable, for creating ID to pointer lookup table, because this is faster. The problem that is being solved here is that, the xdp_rxq_info pointer (stored in xdp_buff) cannot be used directly, as the guaranteed lifetime is too short. The info is needed on a (potentially) remote CPU during DMA-TX completion time . In an xdp_frame the xdp_mem_info is stored, when it got converted from an xdp_buff, which is sufficient for the simple page refcnt based recycle schemes. For more advanced allocators there is a need to store a pointer to the registered allocator. Thus, there is a need to guard the lifetime or validity of the allocator pointer, which is done through this rhashtable ID map to pointer. The removal and validity of of the allocator and helper struct xdp_mem_allocator is guarded by RCU. The allocator will be created by the driver, and registered with xdp_rxq_info_reg_mem_model(). It is up-to debate who is responsible for freeing the allocator pointer or invoking the allocator destructor function. In any case, this must happen via RCU freeing. Use the IDA infrastructure for getting a cyclic increasing ID number, that is used for keeping track of each registered allocator per RX-queue xdp_rxq_info. V4: Per req of Jason Wang - Use xdp_rxq_info_reg_mem_model() in all drivers implementing XDP_REDIRECT, even-though it's not strictly necessary when allocator==NULL for type MEM_TYPE_PAGE_SHARED (given it's zero). V6: Per req of Alex Duyck - Introduce rhashtable_lookup() call in later patch V8: Address sparse should be static warnings (from kbuild test robot) Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 +- drivers/net/tun.c | 6 + drivers/net/virtio_net.c | 7 + include/net/xdp.h | 14 +- net/core/xdp.c | 223 +++++++++++++++++++++++++- 5 files changed, 241 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0bfe6cf2bf8b..f10904ec2172 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6370,7 +6370,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = -1; - int size; + int size, err; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -6407,6 +6407,13 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index) < 0) goto err; + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + goto err; + } + rx_ring->xdp_prog = adapter->xdp_prog; return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2c85e5cac2a9..283bde85c455 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -854,6 +854,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file, tun->dev, tfile->queue_index); if (err < 0) goto out; + err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + xdp_rxq_info_unreg(&tfile->xdp_rxq); + goto out; + } err = 0; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f50e1ad81ad4..42d338fe9a8d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1305,6 +1305,13 @@ static int virtnet_open(struct net_device *dev) if (err < 0) return err; + err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); + return err; + } + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } diff --git a/include/net/xdp.h b/include/net/xdp.h index ea3773f94f65..5f67c62540aa 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -41,6 +41,7 @@ enum xdp_mem_type { struct xdp_mem_info { u32 type; /* enum xdp_mem_type, but known size type */ + u32 id; }; struct xdp_rxq_info { @@ -99,18 +100,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) return xdp_frame; } -static inline -void xdp_return_frame(void *data, struct xdp_mem_info *mem) -{ - if (mem->type == MEM_TYPE_PAGE_SHARED) - page_frag_free(data); - - if (mem->type == MEM_TYPE_PAGE_ORDER0) { - struct page *page = virt_to_page(data); /* Assumes order0 page*/ - - put_page(page); - } -} +void xdp_return_frame(void *data, struct xdp_mem_info *mem); int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index); diff --git a/net/core/xdp.c b/net/core/xdp.c index 7e6b3545277d..8b2cb79b5de0 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -5,6 +5,9 @@ */ #include #include +#include +#include +#include #include @@ -13,6 +16,99 @@ #define REG_STATE_UNREGISTERED 0x2 #define REG_STATE_UNUSED 0x3 +static DEFINE_IDA(mem_id_pool); +static DEFINE_MUTEX(mem_id_lock); +#define MEM_ID_MAX 0xFFFE +#define MEM_ID_MIN 1 +static int mem_id_next = MEM_ID_MIN; + +static bool mem_id_init; /* false */ +static struct rhashtable *mem_id_ht; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + void *allocator; + struct rhash_head node; + struct rcu_head rcu; +}; + +static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) +{ + const u32 *k = data; + const u32 key = *k; + + BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) + != sizeof(u32)); + + /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */ + return key << RHT_HASH_RESERVED_SPACE; +} + +static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, + const void *ptr) +{ + const struct xdp_mem_allocator *xa = ptr; + u32 mem_id = *(u32 *)arg->key; + + return xa->mem.id != mem_id; +} + +static const struct rhashtable_params mem_id_rht_params = { + .nelem_hint = 64, + .head_offset = offsetof(struct xdp_mem_allocator, node), + .key_offset = offsetof(struct xdp_mem_allocator, mem.id), + .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), + .max_size = MEM_ID_MAX, + .min_size = 8, + .automatic_shrinking = true, + .hashfn = xdp_mem_id_hashfn, + .obj_cmpfn = xdp_mem_id_cmp, +}; + +static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) +{ + struct xdp_mem_allocator *xa; + + xa = container_of(rcu, struct xdp_mem_allocator, rcu); + + /* Allow this ID to be reused */ + ida_simple_remove(&mem_id_pool, xa->mem.id); + + /* TODO: Depending on allocator type/pointer free resources */ + + /* Poison memory */ + xa->mem.id = 0xFFFF; + xa->mem.type = 0xF0F0; + xa->allocator = (void *)0xDEAD9001; + + kfree(xa); +} + +static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) +{ + struct xdp_mem_allocator *xa; + int id = xdp_rxq->mem.id; + int err; + + if (id == 0) + return; + + mutex_lock(&mem_id_lock); + + xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); + if (!xa) { + mutex_unlock(&mem_id_lock); + return; + } + + err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params); + WARN_ON(err); + + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); + + mutex_unlock(&mem_id_lock); +} + void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { /* Simplify driver cleanup code paths, allow unreg "unused" */ @@ -21,8 +117,14 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); + __xdp_rxq_info_unreg_mem_model(xdp_rxq); + xdp_rxq->reg_state = REG_STATE_UNREGISTERED; xdp_rxq->dev = NULL; + + /* Reset mem info to defaults */ + xdp_rxq->mem.id = 0; + xdp_rxq->mem.type = 0; } EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); @@ -72,20 +174,131 @@ bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) } EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); +static int __mem_id_init_hash_table(void) +{ + struct rhashtable *rht; + int ret; + + if (unlikely(mem_id_init)) + return 0; + + rht = kzalloc(sizeof(*rht), GFP_KERNEL); + if (!rht) + return -ENOMEM; + + ret = rhashtable_init(rht, &mem_id_rht_params); + if (ret < 0) { + kfree(rht); + return ret; + } + mem_id_ht = rht; + smp_mb(); /* mutex lock should provide enough pairing */ + mem_id_init = true; + + return 0; +} + +/* Allocate a cyclic ID that maps to allocator pointer. + * See: https://www.kernel.org/doc/html/latest/core-api/idr.html + * + * Caller must lock mem_id_lock. + */ +static int __mem_id_cyclic_get(gfp_t gfp) +{ + int retries = 1; + int id; + +again: + id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); + if (id < 0) { + if (id == -ENOSPC) { + /* Cyclic allocator, reset next id */ + if (retries--) { + mem_id_next = MEM_ID_MIN; + goto again; + } + } + return id; /* errno */ + } + mem_id_next = id + 1; + + return id; +} + int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, enum xdp_mem_type type, void *allocator) { + struct xdp_mem_allocator *xdp_alloc; + gfp_t gfp = GFP_KERNEL; + int id, errno, ret; + void *ptr; + + if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { + WARN(1, "Missing register, driver bug"); + return -EFAULT; + } + if (type >= MEM_TYPE_MAX) return -EINVAL; xdp_rxq->mem.type = type; - if (allocator) - return -EOPNOTSUPP; + if (!allocator) + return 0; + + /* Delay init of rhashtable to save memory if feature isn't used */ + if (!mem_id_init) { + mutex_lock(&mem_id_lock); + ret = __mem_id_init_hash_table(); + mutex_unlock(&mem_id_lock); + if (ret < 0) { + WARN_ON(1); + return ret; + } + } + + xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); + if (!xdp_alloc) + return -ENOMEM; + + mutex_lock(&mem_id_lock); + id = __mem_id_cyclic_get(gfp); + if (id < 0) { + errno = id; + goto err; + } + xdp_rxq->mem.id = id; + xdp_alloc->mem = xdp_rxq->mem; + xdp_alloc->allocator = allocator; + + /* Insert allocator into ID lookup table */ + ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); + if (IS_ERR(ptr)) { + errno = PTR_ERR(ptr); + goto err; + } + + mutex_unlock(&mem_id_lock); - /* TODO: Allocate an ID that maps to allocator pointer - * See: https://www.kernel.org/doc/html/latest/core-api/idr.html - */ return 0; +err: + mutex_unlock(&mem_id_lock); + kfree(xdp_alloc); + return errno; } EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); + +void xdp_return_frame(void *data, struct xdp_mem_info *mem) +{ + if (mem->type == MEM_TYPE_PAGE_SHARED) { + page_frag_free(data); + return; + } + + if (mem->type == MEM_TYPE_PAGE_ORDER0) { + struct page *page = virt_to_page(data); /* Assumes order0 page*/ + + put_page(page); + } +} +EXPORT_SYMBOL_GPL(xdp_return_frame); -- cgit v1.2.3 From ff7d6b27f894f1469dc51ccb828b7363ccd9799f Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:17 +0200 Subject: page_pool: refurbish version of page_pool code Need a fast page recycle mechanism for ndo_xdp_xmit API for returning pages on DMA-TX completion time, which have good cross CPU performance, given DMA-TX completion time can happen on a remote CPU. Refurbish my page_pool code, that was presented[1] at MM-summit 2016. Adapted page_pool code to not depend the page allocator and integration into struct page. The DMA mapping feature is kept, even-though it will not be activated/used in this patchset. [1] http://people.netfilter.org/hawk/presentations/MM-summit2016/generic_page_pool_mm_summit2016.pdf V2: Adjustments requested by Tariq - Changed page_pool_create return codes, don't return NULL, only ERR_PTR, as this simplifies err handling in drivers. V4: many small improvements and cleanups - Add DOC comment section, that can be used by kernel-doc - Improve fallback mode, to work better with refcnt based recycling e.g. remove a WARN as pointed out by Tariq e.g. quicker fallback if ptr_ring is empty. V5: Fixed SPDX license as pointed out by Alexei V6: Adjustments requested by Eric Dumazet - Adjust ____cacheline_aligned_in_smp usage/placement - Move rcu_head in struct page_pool - Free pages quicker on destroy, minimize resources delayed an RCU period - Remove code for forward/backward compat ABI interface V8: Issues found by kbuild test robot - Address sparse should be static warnings - Only compile+link when a driver use/select page_pool, mlx5 selects CONFIG_PAGE_POOL, although its first used in two patches Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 + include/net/page_pool.h | 129 ++++++++++ net/Kconfig | 3 + net/core/Makefile | 1 + net/core/page_pool.c | 317 ++++++++++++++++++++++++ 5 files changed, 451 insertions(+) create mode 100644 include/net/page_pool.h create mode 100644 net/core/page_pool.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index c032319f1cb9..12257034131e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -30,6 +30,7 @@ config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on IPV6=y || IPV6=n || MLX5_CORE=m + select PAGE_POOL default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. diff --git a/include/net/page_pool.h b/include/net/page_pool.h new file mode 100644 index 000000000000..1fe77db59518 --- /dev/null +++ b/include/net/page_pool.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * page_pool.h + * Author: Jesper Dangaard Brouer + * Copyright (C) 2016 Red Hat, Inc. + */ + +/** + * DOC: page_pool allocator + * + * This page_pool allocator is optimized for the XDP mode that + * uses one-frame-per-page, but have fallbacks that act like the + * regular page allocator APIs. + * + * Basic use involve replacing alloc_pages() calls with the + * page_pool_alloc_pages() call. Drivers should likely use + * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). + * + * If page_pool handles DMA mapping (use page->private), then API user + * is responsible for invoking page_pool_put_page() once. In-case of + * elevated refcnt, the DMA state is released, assuming other users of + * the page will eventually call put_page(). + * + * If no DMA mapping is done, then it can act as shim-layer that + * fall-through to alloc_page. As no state is kept on the page, the + * regular put_page() call is sufficient. + */ +#ifndef _NET_PAGE_POOL_H +#define _NET_PAGE_POOL_H + +#include /* Needed by ptr_ring */ +#include +#include + +#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */ +#define PP_FLAG_ALL PP_FLAG_DMA_MAP + +/* + * Fast allocation side cache array/stack + * + * The cache size and refill watermark is related to the network + * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX + * ring is usually refilled and the max consumed elements will be 64, + * thus a natural max size of objects needed in the cache. + * + * Keeping room for more objects, is due to XDP_DROP use-case. As + * XDP_DROP allows the opportunity to recycle objects directly into + * this array, as it shares the same softirq/NAPI protection. If + * cache is already full (or partly full) then the XDP_DROP recycles + * would have to take a slower code path. + */ +#define PP_ALLOC_CACHE_SIZE 128 +#define PP_ALLOC_CACHE_REFILL 64 +struct pp_alloc_cache { + u32 count; + void *cache[PP_ALLOC_CACHE_SIZE]; +}; + +struct page_pool_params { + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; /* Numa node id to allocate from pages from */ + struct device *dev; /* device, for DMA pre-mapping purposes */ + enum dma_data_direction dma_dir; /* DMA mapping direction */ +}; + +struct page_pool { + struct rcu_head rcu; + struct page_pool_params p; + + /* + * Data structure for allocation side + * + * Drivers allocation side usually already perform some kind + * of resource protection. Piggyback on this protection, and + * require driver to protect allocation side. + * + * For NIC drivers this means, allocate a page_pool per + * RX-queue. As the RX-queue is already protected by + * Softirq/BH scheduling and napi_schedule. NAPI schedule + * guarantee that a single napi_struct will only be scheduled + * on a single CPU (see napi_schedule). + */ + struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; + + /* Data structure for storing recycled pages. + * + * Returning/freeing pages is more complicated synchronization + * wise, because free's can happen on remote CPUs, with no + * association with allocation resource. + * + * Use ptr_ring, as it separates consumer and producer + * effeciently, it a way that doesn't bounce cache-lines. + * + * TODO: Implement bulk return pages into this structure. + */ + struct ptr_ring ring; +}; + +struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); + +static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_pages(pool, gfp); +} + +struct page_pool *page_pool_create(const struct page_pool_params *params); + +void page_pool_destroy(struct page_pool *pool); + +/* Never call this directly, use helpers below */ +void __page_pool_put_page(struct page_pool *pool, + struct page *page, bool allow_direct); + +static inline void page_pool_put_page(struct page_pool *pool, struct page *page) +{ + __page_pool_put_page(pool, page, false); +} +/* Very limited use-cases allow recycle direct */ +static inline void page_pool_recycle_direct(struct page_pool *pool, + struct page *page) +{ + __page_pool_put_page(pool, page, true); +} + +#endif /* _NET_PAGE_POOL_H */ diff --git a/net/Kconfig b/net/Kconfig index 0428f12c25c2..6fa1a4493b8c 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -423,6 +423,9 @@ config MAY_USE_DEVLINK on MAY_USE_DEVLINK to ensure they do not cause link errors when devlink is a loadable module and the driver using it is built-in. +config PAGE_POOL + bool + endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/core/Makefile b/net/core/Makefile index 6dbbba8c57ae..7080417f8bc8 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -14,6 +14,7 @@ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ fib_notifier.o xdp.o obj-y += net-sysfs.o +obj-$(CONFIG_PAGE_POOL) += page_pool.o obj-$(CONFIG_PROC_FS) += net-procfs.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o obj-$(CONFIG_NETPOLL) += netpoll.o diff --git a/net/core/page_pool.c b/net/core/page_pool.c new file mode 100644 index 000000000000..68bf07206744 --- /dev/null +++ b/net/core/page_pool.c @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * page_pool.c + * Author: Jesper Dangaard Brouer + * Copyright (C) 2016 Red Hat, Inc. + */ +#include +#include +#include + +#include +#include +#include +#include +#include /* for __put_page() */ + +static int page_pool_init(struct page_pool *pool, + const struct page_pool_params *params) +{ + unsigned int ring_qsize = 1024; /* Default */ + + memcpy(&pool->p, params, sizeof(pool->p)); + + /* Validate only known flags were used */ + if (pool->p.flags & ~(PP_FLAG_ALL)) + return -EINVAL; + + if (pool->p.pool_size) + ring_qsize = pool->p.pool_size; + + /* Sanity limit mem that can be pinned down */ + if (ring_qsize > 32768) + return -E2BIG; + + /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. + * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, + * which is the XDP_TX use-case. + */ + if ((pool->p.dma_dir != DMA_FROM_DEVICE) && + (pool->p.dma_dir != DMA_BIDIRECTIONAL)) + return -EINVAL; + + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) + return -ENOMEM; + + return 0; +} + +struct page_pool *page_pool_create(const struct page_pool_params *params) +{ + struct page_pool *pool; + int err = 0; + + pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); + if (!pool) + return ERR_PTR(-ENOMEM); + + err = page_pool_init(pool, params); + if (err < 0) { + pr_warn("%s() gave up with errno %d\n", __func__, err); + kfree(pool); + return ERR_PTR(err); + } + return pool; +} +EXPORT_SYMBOL(page_pool_create); + +/* fast path */ +static struct page *__page_pool_get_cached(struct page_pool *pool) +{ + struct ptr_ring *r = &pool->ring; + struct page *page; + + /* Quicker fallback, avoid locks when ring is empty */ + if (__ptr_ring_empty(r)) + return NULL; + + /* Test for safe-context, caller should provide this guarantee */ + if (likely(in_serving_softirq())) { + if (likely(pool->alloc.count)) { + /* Fast-path */ + page = pool->alloc.cache[--pool->alloc.count]; + return page; + } + /* Slower-path: Alloc array empty, time to refill + * + * Open-coded bulk ptr_ring consumer. + * + * Discussion: the ring consumer lock is not really + * needed due to the softirq/NAPI protection, but + * later need the ability to reclaim pages on the + * ring. Thus, keeping the locks. + */ + spin_lock(&r->consumer_lock); + while ((page = __ptr_ring_consume(r))) { + if (pool->alloc.count == PP_ALLOC_CACHE_REFILL) + break; + pool->alloc.cache[pool->alloc.count++] = page; + } + spin_unlock(&r->consumer_lock); + return page; + } + + /* Slow-path: Get page from locked ring queue */ + page = ptr_ring_consume(&pool->ring); + return page; +} + +/* slow path */ +noinline +static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, + gfp_t _gfp) +{ + struct page *page; + gfp_t gfp = _gfp; + dma_addr_t dma; + + /* We could always set __GFP_COMP, and avoid this branch, as + * prep_new_page() can handle order-0 with __GFP_COMP. + */ + if (pool->p.order) + gfp |= __GFP_COMP; + + /* FUTURE development: + * + * Current slow-path essentially falls back to single page + * allocations, which doesn't improve performance. This code + * need bulk allocation support from the page allocator code. + */ + + /* Cache was empty, do real allocation */ + page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); + if (!page) + return NULL; + + if (!(pool->p.flags & PP_FLAG_DMA_MAP)) + goto skip_dma_map; + + /* Setup DMA mapping: use page->private for DMA-addr + * This mapping is kept for lifetime of page, until leaving pool. + */ + dma = dma_map_page(pool->p.dev, page, 0, + (PAGE_SIZE << pool->p.order), + pool->p.dma_dir); + if (dma_mapping_error(pool->p.dev, dma)) { + put_page(page); + return NULL; + } + set_page_private(page, dma); /* page->private = dma; */ + +skip_dma_map: + /* When page just alloc'ed is should/must have refcnt 1. */ + return page; +} + +/* For using page_pool replace: alloc_pages() API calls, but provide + * synchronization guarantee for allocation side. + */ +struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) +{ + struct page *page; + + /* Fast-path: Get a page from cache */ + page = __page_pool_get_cached(pool); + if (page) + return page; + + /* Slow-path: cache empty, do real allocation */ + page = __page_pool_alloc_pages_slow(pool, gfp); + return page; +} +EXPORT_SYMBOL(page_pool_alloc_pages); + +/* Cleanup page_pool state from page */ +static void __page_pool_clean_page(struct page_pool *pool, + struct page *page) +{ + if (!(pool->p.flags & PP_FLAG_DMA_MAP)) + return; + + /* DMA unmap */ + dma_unmap_page(pool->p.dev, page_private(page), + PAGE_SIZE << pool->p.order, pool->p.dma_dir); + set_page_private(page, 0); +} + +/* Return a page to the page allocator, cleaning up our state */ +static void __page_pool_return_page(struct page_pool *pool, struct page *page) +{ + __page_pool_clean_page(pool, page); + put_page(page); + /* An optimization would be to call __free_pages(page, pool->p.order) + * knowing page is not part of page-cache (thus avoiding a + * __page_cache_release() call). + */ +} + +static bool __page_pool_recycle_into_ring(struct page_pool *pool, + struct page *page) +{ + int ret; + /* BH protection not needed if current is serving softirq */ + if (in_serving_softirq()) + ret = ptr_ring_produce(&pool->ring, page); + else + ret = ptr_ring_produce_bh(&pool->ring, page); + + return (ret == 0) ? true : false; +} + +/* Only allow direct recycling in special circumstances, into the + * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. + * + * Caller must provide appropriate safe context. + */ +static bool __page_pool_recycle_direct(struct page *page, + struct page_pool *pool) +{ + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) + return false; + + /* Caller MUST have verified/know (page_ref_count(page) == 1) */ + pool->alloc.cache[pool->alloc.count++] = page; + return true; +} + +void __page_pool_put_page(struct page_pool *pool, + struct page *page, bool allow_direct) +{ + /* This allocator is optimized for the XDP mode that uses + * one-frame-per-page, but have fallbacks that act like the + * regular page allocator APIs. + * + * refcnt == 1 means page_pool owns page, and can recycle it. + */ + if (likely(page_ref_count(page) == 1)) { + /* Read barrier done in page_ref_count / READ_ONCE */ + + if (allow_direct && in_serving_softirq()) + if (__page_pool_recycle_direct(page, pool)) + return; + + if (!__page_pool_recycle_into_ring(pool, page)) { + /* Cache full, fallback to free pages */ + __page_pool_return_page(pool, page); + } + return; + } + /* Fallback/non-XDP mode: API user have elevated refcnt. + * + * Many drivers split up the page into fragments, and some + * want to keep doing this to save memory and do refcnt based + * recycling. Support this use case too, to ease drivers + * switching between XDP/non-XDP. + * + * In-case page_pool maintains the DMA mapping, API user must + * call page_pool_put_page once. In this elevated refcnt + * case, the DMA is unmapped/released, as driver is likely + * doing refcnt based recycle tricks, meaning another process + * will be invoking put_page. + */ + __page_pool_clean_page(pool, page); + put_page(page); +} +EXPORT_SYMBOL(__page_pool_put_page); + +static void __page_pool_empty_ring(struct page_pool *pool) +{ + struct page *page; + + /* Empty recycle ring */ + while ((page = ptr_ring_consume(&pool->ring))) { + /* Verify the refcnt invariant of cached pages */ + if (!(page_ref_count(page) == 1)) + pr_crit("%s() page_pool refcnt %d violation\n", + __func__, page_ref_count(page)); + + __page_pool_return_page(pool, page); + } +} + +static void __page_pool_destroy_rcu(struct rcu_head *rcu) +{ + struct page_pool *pool; + + pool = container_of(rcu, struct page_pool, rcu); + + WARN(pool->alloc.count, "API usage violation"); + + __page_pool_empty_ring(pool); + ptr_ring_cleanup(&pool->ring, NULL); + kfree(pool); +} + +/* Cleanup and release resources */ +void page_pool_destroy(struct page_pool *pool) +{ + struct page *page; + + /* Empty alloc cache, assume caller made sure this is + * no-longer in use, and page_pool_alloc_pages() cannot be + * call concurrently. + */ + while (pool->alloc.count) { + page = pool->alloc.cache[--pool->alloc.count]; + __page_pool_return_page(pool, page); + } + + /* No more consumers should exist, but producers could still + * be in-flight. + */ + __page_pool_empty_ring(pool); + + /* An xdp_mem_allocator can still ref page_pool pointer */ + call_rcu(&pool->rcu, __page_pool_destroy_rcu); +} +EXPORT_SYMBOL(page_pool_destroy); -- cgit v1.2.3 From 60bbf7eeef10dc647430646d7fe5e3d8d132dbec Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:27 +0200 Subject: mlx5: use page_pool for xdp_return_frame call This patch shows how it is possible to have both the driver local page cache, which uses elevated refcnt for "catching"/avoiding SKB put_page returns the page through the page allocator. And at the same time, have pages getting returned to the page_pool from ndp_xdp_xmit DMA completion. The performance improvement for XDP_REDIRECT in this patch is really good. Especially considering that (currently) the xdp_return_frame API and page_pool_put_page() does per frame operations of both rhashtable ID-lookup and locked return into (page_pool) ptr_ring. (It is the plan to remove these per frame operation in a followup patchset). The benchmark performed was RX on mlx5 and XDP_REDIRECT out ixgbe, with xdp_redirect_map (using devmap) . And the target/maximum capability of ixgbe is 13Mpps (on this HW setup). Before this patch for mlx5, XDP redirected frames were returned via the page allocator. The single flow performance was 6Mpps, and if I started two flows the collective performance drop to 4Mpps, because we hit the page allocator lock (further negative scaling occurs). Two test scenarios need to be covered, for xdp_return_frame API, which is DMA-TX completion running on same-CPU or cross-CPU free/return. Results were same-CPU=10Mpps, and cross-CPU=12Mpps. This is very close to our 13Mpps max target. The reason max target isn't reached in cross-CPU test, is likely due to RX-ring DMA unmap/map overhead (which doesn't occur in ixgbe to ixgbe testing). It is also planned to remove this unnecessary DMA unmap in a later patchset V2: Adjustments requested by Tariq - Changed page_pool_create return codes not return NULL, only ERR_PTR, as this simplifies err handling in drivers. - Save a branch in mlx5e_page_release - Correct page_pool size calc for MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ V5: Updated patch desc V8: Adjust for b0cedc844c00 ("net/mlx5e: Remove rq_headroom field from params") V9: - Adjust for 121e89275471 ("net/mlx5e: Refactor RQ XDP_TX indication") - Adjust for 73281b78a37a ("net/mlx5e: Derive Striding RQ size from MTU") - Correct handling if page_pool_create fail for MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ V10: Req from Tariq - Change pool_size calc for MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Tariq Toukan Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 41 +++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 16 ++++++--- 3 files changed, 48 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1a05d1072c5e..3317a4da87cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -53,6 +53,8 @@ #include "mlx5_core.h" #include "en_stats.h" +struct page_pool; + #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) @@ -534,6 +536,7 @@ struct mlx5e_rq { unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; DECLARE_BITMAP(flags, 8); + struct page_pool *page_pool; /* control */ struct mlx5_wq_ctrl wq_ctrl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2dca0933dfd3..f10037419978 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "eswitch.h" #include "en.h" #include "en_tc.h" @@ -389,10 +390,11 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *rqp, struct mlx5e_rq *rq) { + struct page_pool_params pp_params = { 0 }; struct mlx5_core_dev *mdev = c->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 byte_count; + u32 byte_count, pool_size; int npages; int wq_sz; int err; @@ -432,9 +434,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); + pool_size = 1 << params->log_rq_mtu_frames; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + + pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params); rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -512,13 +517,31 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->mkey_be = c->mkey_be; } - /* This must only be activate for order-0 pages */ - if (rq->xdp_prog) { - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, - MEM_TYPE_PAGE_ORDER0, NULL); - if (err) - goto err_rq_wq_destroy; + /* Create a page_pool and register it with rxq */ + pp_params.order = rq->buff.page_order; + pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pp_params.pool_size = pool_size; + pp_params.nid = cpu_to_node(c->cpu); + pp_params.dev = c->pdev; + pp_params.dma_dir = rq->buff.map_dir; + + /* page_pool can be used even when there is no rq->xdp_prog, + * given page_pool does not handle DMA mapping there is no + * required state to clear. And page_pool gracefully handle + * elevated refcnt. + */ + rq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rq->page_pool)) { + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + kfree(rq->wqe.frag_info); + err = PTR_ERR(rq->page_pool); + rq->page_pool = NULL; + goto err_rq_wq_destroy; } + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_POOL, rq->page_pool); + if (err) + goto err_rq_wq_destroy; for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); @@ -556,6 +579,8 @@ err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); + if (rq->page_pool) + page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -569,6 +594,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); + if (rq->page_pool) + page_pool_destroy(rq->page_pool); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0e24be05907f..f42436d7f2d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -221,7 +222,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, if (mlx5e_rx_cache_get(rq, dma_info)) return 0; - dma_info->page = dev_alloc_pages(rq->buff.page_order); + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!dma_info->page)) return -ENOMEM; @@ -246,11 +247,16 @@ static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle) { - if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info)) - return; + if (likely(recycle)) { + if (mlx5e_rx_cache_put(rq, dma_info)) + return; - mlx5e_page_dma_unmap(rq, dma_info); - put_page(dma_info->page); + mlx5e_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); + } else { + mlx5e_page_dma_unmap(rq, dma_info); + put_page(dma_info->page); + } } static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, -- cgit v1.2.3 From 039930945a72d9af5ff04ae9b9e60658a52e0770 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:32 +0200 Subject: xdp: transition into using xdp_frame for return API Changing API xdp_return_frame() to take struct xdp_frame as argument, seems like a natural choice. But there are some subtle performance details here that needs extra care, which is a deliberate choice. When de-referencing xdp_frame on a remote CPU during DMA-TX completion, result in the cache-line is change to "Shared" state. Later when the page is reused for RX, then this xdp_frame cache-line is written, which change the state to "Modified". This situation already happens (naturally) for, virtio_net, tun and cpumap as the xdp_frame pointer is the queued object. In tun and cpumap, the ptr_ring is used for efficiently transferring cache-lines (with pointers) between CPUs. Thus, the only option is to de-referencing xdp_frame. It is only the ixgbe driver that had an optimization, in which it can avoid doing the de-reference of xdp_frame. The driver already have TX-ring queue, which (in case of remote DMA-TX completion) have to be transferred between CPUs anyhow. In this data area, we stored a struct xdp_mem_info and a data pointer, which allowed us to avoid de-referencing xdp_frame. To compensate for this, a prefetchw is used for telling the cache coherency protocol about our access pattern. My benchmarks show that this prefetchw is enough to compensate the ixgbe driver. V7: Adjust for commit d9314c474d4f ("i40e: add support for XDP_REDIRECT") V8: Adjust for commit bd658dda4237 ("net/mlx5e: Separate dma base address and offset in dma_sync call") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 4 +--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 17 +++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + drivers/net/tun.c | 4 ++-- drivers/net/virtio_net.c | 2 +- include/net/xdp.h | 2 +- kernel/bpf/cpumap.c | 6 +++--- net/core/xdp.c | 4 +++- 9 files changed, 25 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 96c54cbfb1f9..c8bf4d35fdea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -638,8 +638,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else if (ring_is_xdp(ring)) - xdp_return_frame(tx_buffer->xdpf->data, - &tx_buffer->xdpf->mem); + xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) @@ -842,7 +841,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* free the skb/XDP data */ if (ring_is_xdp(tx_ring)) - xdp_return_frame(tx_buf->xdpf->data, &tx_buf->xdpf->mem); + xdp_return_frame(tx_buf->xdpf); else napi_consume_skb(tx_buf->skb, napi_budget); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index abb5248e917e..7dd5038cfcc4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -241,8 +241,7 @@ struct ixgbe_tx_buffer { unsigned long time_stamp; union { struct sk_buff *skb; - /* XDP uses address ptr on irq_clean */ - void *data; + struct xdp_frame *xdpf; }; unsigned int bytecount; unsigned short gso_segs; @@ -250,7 +249,6 @@ struct ixgbe_tx_buffer { DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; - struct xdp_mem_info xdp_mem; }; struct ixgbe_rx_buffer { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f10904ec2172..4f2864165723 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1216,7 +1216,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, /* free the skb */ if (ring_is_xdp(tx_ring)) - xdp_return_frame(tx_buffer->data, &tx_buffer->xdp_mem); + xdp_return_frame(tx_buffer->xdpf); else napi_consume_skb(tx_buffer->skb, napi_budget); @@ -2386,6 +2386,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp.data_hard_start = xdp.data - ixgbe_rx_offset(rx_ring); xdp.data_end = xdp.data + size; + prefetchw(xdp.data_hard_start); /* xdp_frame write */ skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); } @@ -5797,7 +5798,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) /* Free all the Tx ring sk_buffs */ if (ring_is_xdp(tx_ring)) - xdp_return_frame(tx_buffer->data, &tx_buffer->xdp_mem); + xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); @@ -8348,16 +8349,21 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; + struct xdp_frame *xdpf; u32 len, cmd_type; dma_addr_t dma; u16 i; - len = xdp->data_end - xdp->data; + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return -EOVERFLOW; + + len = xdpf->len; if (unlikely(!ixgbe_desc_unused(ring))) return IXGBE_XDP_CONSUMED; - dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) return IXGBE_XDP_CONSUMED; @@ -8372,8 +8378,7 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->data = xdp->data; - tx_buffer->xdp_mem = xdp->rxq->mem; + tx_buffer->xdpf = xdpf; tx_desc->read.buffer_addr = cpu_to_le64(dma); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f42436d7f2d9..7bbf0db27a01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -890,6 +890,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ prefetch(data); wi->offset += frag_size; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 283bde85c455..bec130cdbd9d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -663,7 +663,7 @@ void tun_ptr_free(void *ptr) if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - xdp_return_frame(xdpf->data, &xdpf->mem); + xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } @@ -2196,7 +2196,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); ret = tun_put_user_xdp(tun, tfile, xdpf, to); - xdp_return_frame(xdpf->data, &xdpf->mem); + xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 42d338fe9a8d..ab3d7cbc4c49 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -430,7 +430,7 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi, /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent->data, &xdpf_sent->mem); + xdp_return_frame(xdpf_sent); xdpf = convert_to_xdp_frame(xdp); if (unlikely(!xdpf)) diff --git a/include/net/xdp.h b/include/net/xdp.h index d0ee437753dc..137ad5f9f40f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -103,7 +103,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) return xdp_frame; } -void xdp_return_frame(void *data, struct xdp_mem_info *mem); +void xdp_return_frame(struct xdp_frame *xdpf); int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index); diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index bcdc4dea5ce7..c95b04ec103e 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -219,7 +219,7 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) while ((xdpf = ptr_ring_consume(ring))) if (WARN_ON_ONCE(xdpf)) - xdp_return_frame(xdpf->data, &xdpf->mem); + xdp_return_frame(xdpf); } static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) @@ -275,7 +275,7 @@ static int cpu_map_kthread_run(void *data) skb = cpu_map_build_skb(rcpu, xdpf); if (!skb) { - xdp_return_frame(xdpf->data, &xdpf->mem); + xdp_return_frame(xdpf); continue; } @@ -578,7 +578,7 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, err = __ptr_ring_produce(q, xdpf); if (err) { drops++; - xdp_return_frame(xdpf->data, &xdpf->mem); + xdp_return_frame(xdpf); } processed++; } diff --git a/net/core/xdp.c b/net/core/xdp.c index 33e382afbd95..0c86b53a3a63 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -308,9 +308,11 @@ err: } EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); -void xdp_return_frame(void *data, struct xdp_mem_info *mem) +void xdp_return_frame(struct xdp_frame *xdpf) { + struct xdp_mem_info *mem = &xdpf->mem; struct xdp_mem_allocator *xa; + void *data = xdpf->data; struct page *page; switch (mem->type) { -- cgit v1.2.3 From 44fa2dbd475996ddc8f3a0e6113dee983e0ee3aa Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Apr 2018 16:46:37 +0200 Subject: xdp: transition into using xdp_frame for ndo_xdp_xmit Changing API ndo_xdp_xmit to take a struct xdp_frame instead of struct xdp_buff. This brings xdp_return_frame and ndp_xdp_xmit in sync. This builds towards changing the API further to become a bulk API, because xdp_buff is not a queue-able object while xdp_frame is. V4: Adjust for commit 59655a5b6c83 ("tuntap: XDP_TX can use native XDP") V7: Adjust for commit d9314c474d4f ("i40e: add support for XDP_REDIRECT") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 30 +++++++++++++++------------ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 24 +++++++++++---------- drivers/net/tun.c | 19 ++++++++++------- drivers/net/virtio_net.c | 24 ++++++++++++--------- include/linux/netdevice.h | 4 ++-- net/core/filter.c | 17 +++++++++++++-- 7 files changed, 74 insertions(+), 46 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c8bf4d35fdea..87fb27ab9c24 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2203,9 +2203,20 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, #define I40E_XDP_CONSUMED 1 #define I40E_XDP_TX 2 -static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, +static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); +static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, + struct i40e_ring *xdp_ring) +{ + struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); + + if (unlikely(!xdpf)) + return I40E_XDP_CONSUMED; + + return i40e_xmit_xdp_ring(xdpf, xdp_ring); +} + /** * i40e_run_xdp - run an XDP program * @rx_ring: Rx ring being processed @@ -2233,7 +2244,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - result = i40e_xmit_xdp_ring(xdp, xdp_ring); + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); @@ -3480,21 +3491,14 @@ dma_error: * @xdp: data to transmit * @xdp_ring: XDP Tx ring **/ -static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, +static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring) { u16 i = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; - struct xdp_frame *xdpf; + u32 size = xdpf->len; dma_addr_t dma; - u32 size; - - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) - return I40E_XDP_CONSUMED; - - size = xdpf->len; if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { xdp_ring->tx_stats.tx_busy++; @@ -3684,7 +3688,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * * Returns Zero if sent, else an error code **/ -int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); @@ -3697,7 +3701,7 @@ int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; - err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); + err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]); if (err != I40E_XDP_TX) return -ENOSPC; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 857b1d743c8d..4bf318b8be85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -511,7 +511,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); -int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); +int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf); void i40e_xdp_flush(struct net_device *dev); /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4f2864165723..51e7d82a5860 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2262,7 +2262,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, #define IXGBE_XDP_TX 2 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_buff *xdp); + struct xdp_frame *xdpf); static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, @@ -2270,6 +2270,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; u32 act; rcu_read_lock(); @@ -2278,12 +2279,19 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, if (!xdp_prog) goto xdp_out; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: - result = ixgbe_xmit_xdp_ring(adapter, xdp); + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) { + result = IXGBE_XDP_CONSUMED; + break; + } + result = ixgbe_xmit_xdp_ring(adapter, xdpf); break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); @@ -2386,7 +2394,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp.data_hard_start = xdp.data - ixgbe_rx_offset(rx_ring); xdp.data_end = xdp.data + size; - prefetchw(xdp.data_hard_start); /* xdp_frame write */ skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); } @@ -8344,20 +8351,15 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_buff *xdp) + struct xdp_frame *xdpf) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct xdp_frame *xdpf; u32 len, cmd_type; dma_addr_t dma; u16 i; - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) - return -EOVERFLOW; - len = xdpf->len; if (unlikely(!ixgbe_desc_unused(ring))) @@ -10010,7 +10012,7 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; @@ -10026,7 +10028,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (unlikely(!ring)) return -ENXIO; - err = ixgbe_xmit_xdp_ring(adapter, xdp); + err = ixgbe_xmit_xdp_ring(adapter, xdpf); if (err != IXGBE_XDP_TX) return -ENOSPC; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bec130cdbd9d..1e58be152d5c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1301,18 +1301,13 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; -static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static int tun_xdp_xmit(struct net_device *dev, struct xdp_frame *frame) { struct tun_struct *tun = netdev_priv(dev); - struct xdp_frame *frame; struct tun_file *tfile; u32 numqueues; int ret = 0; - frame = convert_to_xdp_frame(xdp); - if (unlikely(!frame)) - return -EOVERFLOW; - rcu_read_lock(); numqueues = READ_ONCE(tun->numqueues); @@ -1336,6 +1331,16 @@ out: return ret; } +static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) +{ + struct xdp_frame *frame = convert_to_xdp_frame(xdp); + + if (unlikely(!frame)) + return -EOVERFLOW; + + return tun_xdp_xmit(dev, frame); +} + static void tun_xdp_flush(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -1683,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_TX: get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_xmit(tun->dev, &xdp)) + if (tun_xdp_tx(tun->dev, &xdp)) goto err_redirect; tun_xdp_flush(tun->dev); rcu_read_unlock(); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ab3d7cbc4c49..01694e26f03e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -416,10 +416,10 @@ static void virtnet_xdp_flush(struct net_device *dev) } static int __virtnet_xdp_xmit(struct virtnet_info *vi, - struct xdp_buff *xdp) + struct xdp_frame *xdpf) { struct virtio_net_hdr_mrg_rxbuf *hdr; - struct xdp_frame *xdpf, *xdpf_sent; + struct xdp_frame *xdpf_sent; struct send_queue *sq; unsigned int len; unsigned int qp; @@ -432,10 +432,6 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi, while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) xdp_return_frame(xdpf_sent); - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) - return -EOVERFLOW; - /* virtqueue want to use data area in-front of packet */ if (unlikely(xdpf->metasize > 0)) return -EOPNOTSUPP; @@ -459,7 +455,7 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi, return 0; } -static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; @@ -472,7 +468,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (!xdp_prog) return -ENXIO; - return __virtnet_xdp_xmit(vi, xdp); + return __virtnet_xdp_xmit(vi, xdpf); } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -569,6 +565,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; + struct xdp_frame *xdpf; struct xdp_buff xdp; void *orig_data; u32 act; @@ -611,7 +608,10 @@ static struct sk_buff *receive_small(struct net_device *dev, delta = orig_data - xdp.data; break; case XDP_TX: - err = __virtnet_xdp_xmit(vi, &xdp); + xdpf = convert_to_xdp_frame(&xdp); + if (unlikely(!xdpf)) + goto err_xdp; + err = __virtnet_xdp_xmit(vi, xdpf); if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; @@ -702,6 +702,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { + struct xdp_frame *xdpf; struct page *xdp_page; struct xdp_buff xdp; void *data; @@ -766,7 +767,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - err = __virtnet_xdp_xmit(vi, &xdp); + xdpf = convert_to_xdp_frame(&xdp); + if (unlikely(!xdpf)) + goto err_xdp; + err = __virtnet_xdp_xmit(vi, xdpf); if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cf44503ea81a..14e0777ffcfb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1165,7 +1165,7 @@ struct dev_ifalias { * This function is used to set or query state related to XDP on the * netdevice and manage BPF offload. See definition of * enum bpf_netdev_command for details. - * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); + * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_frame *xdp); * This function is used to submit a XDP packet for transmit on a * netdevice. * void (*ndo_xdp_flush)(struct net_device *dev); @@ -1356,7 +1356,7 @@ struct net_device_ops { int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); int (*ndo_xdp_xmit)(struct net_device *dev, - struct xdp_buff *xdp); + struct xdp_frame *xdp); void (*ndo_xdp_flush)(struct net_device *dev); }; diff --git a/net/core/filter.c b/net/core/filter.c index d31aff93270d..3bb0cb98a9be 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2749,13 +2749,18 @@ static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp, u32 index) { + struct xdp_frame *xdpf; int err; if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; } - err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp); + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return -EOVERFLOW; + + err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); if (err) return err; dev->netdev_ops->ndo_xdp_flush(dev); @@ -2771,11 +2776,19 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, if (map->map_type == BPF_MAP_TYPE_DEVMAP) { struct net_device *dev = fwd; + struct xdp_frame *xdpf; if (!dev->netdev_ops->ndo_xdp_xmit) return -EOPNOTSUPP; - err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp); + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return -EOVERFLOW; + + /* TODO: move to inside map code instead, for bulk support + * err = dev_map_enqueue(dev, xdp); + */ + err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); if (err) return err; __dev_map_insert_ctx(map, index); -- cgit v1.2.3 From 72f6d71e491e6ce269b564865b21fab0a4402dd3 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Tue, 17 Apr 2018 14:11:28 +0800 Subject: vxlan: add ttl inherit support Like tos inherit, ttl inherit should also means inherit the inner protocol's ttl values, which actually not implemented in vxlan yet. But we could not treat ttl == 0 as "use the inner TTL", because that would be used also when the "ttl" option is not specified and that would be a behavior change, and breaking real use cases. So add a different attribute IFLA_VXLAN_TTL_INHERIT when "ttl inherit" is specified with ip cmd. Reported-by: Jianlin Shi Suggested-by: Jiri Benc Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 17 ++++++++++++++--- include/net/ip_tunnels.h | 11 +++++++++++ include/net/vxlan.h | 1 + include/uapi/linux/if_link.h | 1 + 4 files changed, 27 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fab7a4db249e..aee0e60471f1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2085,9 +2085,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, local_ip = vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; md->gbp = skb->mark; - ttl = vxlan->cfg.ttl; - if (!ttl && vxlan_addr_multicast(dst)) - ttl = 1; + if (flags & VXLAN_F_TTL_INHERIT) { + ttl = ip_tunnel_get_ttl(old_iph, skb); + } else { + ttl = vxlan->cfg.ttl; + if (!ttl && vxlan_addr_multicast(dst)) + ttl = 1; + } tos = vxlan->cfg.tos; if (tos == 1) @@ -2709,6 +2713,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, + [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -3254,6 +3259,12 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_VXLAN_TTL]) conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + if (data[IFLA_VXLAN_TTL_INHERIT]) { + if (changelink) + return -EOPNOTSUPP; + conf->flags |= VXLAN_F_TTL_INHERIT; + } + if (data[IFLA_VXLAN_LABEL]) conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & IPV6_FLOWLABEL_MASK; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 540a4b4417bf..751646adc769 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -379,6 +379,17 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, return 0; } +static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, + const struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return iph->ttl; + else if (skb->protocol == htons(ETH_P_IPV6)) + return ((const struct ipv6hdr *)iph)->hop_limit; + else + return 0; +} + /* Propogate ECN bits out */ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, const struct sk_buff *skb) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index ad73d8b3fcc2..b99a02ae3934 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -262,6 +262,7 @@ struct vxlan_dev { #define VXLAN_F_COLLECT_METADATA 0x2000 #define VXLAN_F_GPE 0x4000 #define VXLAN_F_IPV6_LINKLOCAL 0x8000 +#define VXLAN_F_TTL_INHERIT 0x10000 /* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 68699f654118..b85266420bfb 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -516,6 +516,7 @@ enum { IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, + IFLA_VXLAN_TTL_INHERIT, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) -- cgit v1.2.3 From a64dcddc5cdfbf1d4d0af519c30cd6c2e4828282 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 17 Apr 2018 15:17:11 +0530 Subject: cxgb4vf: display pause settings Add support to display pause settings Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 9a81b52307a9..71f13bd2b5e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1419,6 +1419,22 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, base->duplex = DUPLEX_UNKNOWN; } + if (pi->link_cfg.fc & PAUSE_RX) { + if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Pause); + } else { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + } else if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + base->autoneg = pi->link_cfg.autoneg; if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, -- cgit v1.2.3 From 43b059a312f35799ef7e3a49aba4f1e0128e30ca Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 17 Apr 2018 17:33:09 -0700 Subject: vrf: Move fib6_table into net_vrf A later patch removes rt6i_table from rt6_info. Save the ipv6 table for a VRF in net_vrf. fib tables can not be deleted so no reference counting or locking is required. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 0a2b180d138a..90b5f3900c22 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -48,6 +48,9 @@ static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; struct rt6_info __rcu *rt6; +#if IS_ENABLED(CONFIG_IPV6) + struct fib6_table *fib6_table; +#endif u32 tb_id; }; @@ -496,7 +499,6 @@ static int vrf_rt6_create(struct net_device *dev) int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM; struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); - struct fib6_table *rt6i_table; struct rt6_info *rt6; int rc = -ENOMEM; @@ -504,8 +506,8 @@ static int vrf_rt6_create(struct net_device *dev) if (!ipv6_mod_enabled()) return 0; - rt6i_table = fib6_new_table(net, vrf->tb_id); - if (!rt6i_table) + vrf->fib6_table = fib6_new_table(net, vrf->tb_id); + if (!vrf->fib6_table) goto out; /* create a dst for routing packets out a VRF device */ @@ -513,7 +515,6 @@ static int vrf_rt6_create(struct net_device *dev) if (!rt6) goto out; - rt6->rt6i_table = rt6i_table; rt6->dst.output = vrf_output6; rcu_assign_pointer(vrf->rt6, rt6); @@ -946,22 +947,8 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net, int flags) { struct net_vrf *vrf = netdev_priv(dev); - struct fib6_table *table = NULL; - struct rt6_info *rt6; - - rcu_read_lock(); - - /* fib6_table does not have a refcnt and can not be freed */ - rt6 = rcu_dereference(vrf->rt6); - if (likely(rt6)) - table = rt6->rt6i_table; - - rcu_read_unlock(); - - if (!table) - return NULL; - return ip6_pol_route(net, table, ifindex, fl6, skb, flags); + return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags); } static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, -- cgit v1.2.3 From 5e670d844b2a4e47d1b9b9aceb14dd3c12a6d4bf Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 17 Apr 2018 17:33:14 -0700 Subject: net/ipv6: Move nexthop data to fib6_nh Introduce fib6_nh structure and move nexthop related data from rt6_info and rt6_info.dst to fib6_nh. References to dev, gateway or lwtstate from a FIB lookup perspective are converted to use fib6_nh; datapath references to dst version are left as is. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 32 ++-- include/net/ip6_fib.h | 16 +- include/net/ip6_route.h | 6 +- net/ipv6/addrconf.c | 2 +- net/ipv6/ip6_fib.c | 6 +- net/ipv6/route.c | 162 +++++++++++---------- 6 files changed, 125 insertions(+), 99 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1904c0323d39..d995a0b52d7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2770,9 +2770,9 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, struct in6_addr *gw; int ifindex, weight; - ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex; - weight = mlxsw_sp_rt6->rt->rt6i_nh_weight; - gw = &mlxsw_sp_rt6->rt->rt6i_gateway; + ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex; + weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight; + gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw; if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, weight)) return false; @@ -2838,7 +2838,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) struct net_device *dev; list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { - dev = mlxsw_sp_rt6->rt->dst.dev; + dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev; val ^= dev->ifindex; } @@ -3836,9 +3836,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; struct rt6_info *rt = mlxsw_sp_rt6->rt; - if (nh->rif && nh->rif->dev == rt->dst.dev && + if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev && ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, - &rt->rt6i_gateway)) + &rt->fib6_nh.nh_gw)) return nh; continue; } @@ -3895,7 +3895,7 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, - list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; return; } @@ -3905,9 +3905,9 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); if (nh && nh->offloaded) - mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; else - mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -3922,7 +3922,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { struct rt6_info *rt = mlxsw_sp_rt6->rt; - rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -4818,8 +4818,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, const struct rt6_info *rt, enum mlxsw_sp_ipip_type *ret) { - return rt->dst.dev && - mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret); + return rt->fib6_nh.nh_dev && + mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret); } static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, @@ -4829,7 +4829,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, { const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->fib6_nh.nh_dev; struct mlxsw_sp_rif *rif; int err; @@ -4872,11 +4872,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, const struct rt6_info *rt) { - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->fib6_nh.nh_dev; nh->nh_grp = nh_grp; - nh->nh_weight = rt->rt6i_nh_weight; - memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + nh->nh_weight = rt->fib6_nh.nh_weight; + memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr)); mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 0165820bbafb..f0a88370ba95 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -127,6 +127,16 @@ struct rt6_exception { #define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT) #define FIB6_MAX_DEPTH 5 +struct fib6_nh { + struct in6_addr nh_gw; + struct net_device *nh_dev; + struct lwtunnel_state *nh_lwtstate; + + unsigned int nh_flags; + atomic_t nh_upper_bound; + int nh_weight; +}; + struct rt6_info { struct dst_entry dst; struct rt6_info __rcu *rt6_next; @@ -149,12 +159,9 @@ struct rt6_info { */ struct list_head rt6i_siblings; unsigned int rt6i_nsiblings; - atomic_t rt6i_nh_upper_bound; atomic_t rt6i_ref; - unsigned int rt6i_nh_flags; - /* These are in a separate cache line. */ struct rt6key rt6i_dst ____cacheline_aligned_in_smp; u32 rt6i_flags; @@ -171,13 +178,14 @@ struct rt6_info { u32 rt6i_metric; u32 rt6i_pmtu; /* more non-fragment space at head required */ - int rt6i_nh_weight; unsigned short rt6i_nfheader_len; u8 rt6i_protocol; u8 fib6_type; u8 exception_bucket_flushed:1, should_flush:1, unused:6; + + struct fib6_nh fib6_nh; }; #define for_each_fib6_node_rt_rcu(fn) \ diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 1130a1144dfd..655e13017a45 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -273,10 +273,10 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) { - return a->dst.dev == b->dst.dev && + return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev && a->rt6i_idev == b->rt6i_idev && - ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && - !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); + ipv6_addr_equal(&a->fib6_nh.nh_gw, &b->fib6_nh.nh_gw) && + !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate); } #endif diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f71fcf2635d5..483c8772e856 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2369,7 +2369,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, goto out; for_each_fib6_node_rt_rcu(fn) { - if (rt->dst.dev->ifindex != dev->ifindex) + if (rt->fib6_nh.nh_dev->ifindex != dev->ifindex) continue; if ((rt->rt6i_flags & flags) != flags) continue; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 74d2a3748e2f..64b73e65f114 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2221,6 +2221,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v) { struct rt6_info *rt = v; struct ipv6_route_iter *iter = seq->private; + const struct net_device *dev; seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); @@ -2230,14 +2231,15 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "00000000000000000000000000000000 00 "); #endif if (rt->rt6i_flags & RTF_GATEWAY) - seq_printf(seq, "%pi6", &rt->rt6i_gateway); + seq_printf(seq, "%pi6", &rt->fib6_nh.nh_gw); else seq_puts(seq, "00000000000000000000000000000000"); + dev = rt->fib6_nh.nh_dev; seq_printf(seq, " %08x %08x %08x %08x %8s\n", rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), rt->dst.__use, rt->rt6i_flags, - rt->dst.dev ? rt->dst.dev->name : ""); + dev ? dev->name : ""); iter->w.leaf = NULL; return 0; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1cc01f5bb773..222af19d3403 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -466,12 +466,15 @@ static struct rt6_info *rt6_multipath_select(const struct net *net, if (!fl6->mp_hash) fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL); - if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound)) + if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound)) return match; list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, rt6i_siblings) { - if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound)) + int nh_upper_bound; + + nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound); + if (fl6->mp_hash > nh_upper_bound) continue; if (rt6_score_route(sibling, oif, strict) < 0) break; @@ -495,13 +498,14 @@ static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *local = NULL; struct rt6_info *sprt; - if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD)) + if (!oif && ipv6_addr_any(saddr) && + !(rt->fib6_nh.nh_flags & RTNH_F_DEAD)) return rt; for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) { - struct net_device *dev = sprt->dst.dev; + const struct net_device *dev = sprt->fib6_nh.nh_dev; - if (sprt->rt6i_nh_flags & RTNH_F_DEAD) + if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD) continue; if (oif) { @@ -533,7 +537,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net, return net->ipv6.ip6_null_entry; } - return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt; + return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt; } #ifdef CONFIG_IPV6_ROUTER_PREF @@ -558,7 +562,10 @@ static void rt6_probe_deferred(struct work_struct *w) static void rt6_probe(struct rt6_info *rt) { struct __rt6_probe_work *work; + const struct in6_addr *nh_gw; struct neighbour *neigh; + struct net_device *dev; + /* * Okay, this does not seem to be appropriate * for now, however, we need to check if it @@ -569,8 +576,11 @@ static void rt6_probe(struct rt6_info *rt) */ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) return; + + nh_gw = &rt->fib6_nh.nh_gw; + dev = rt->fib6_nh.nh_dev; rcu_read_lock_bh(); - neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); + neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); if (neigh) { if (neigh->nud_state & NUD_VALID) goto out; @@ -592,9 +602,9 @@ static void rt6_probe(struct rt6_info *rt) if (work) { INIT_WORK(&work->work, rt6_probe_deferred); - work->target = rt->rt6i_gateway; - dev_hold(rt->dst.dev); - work->dev = rt->dst.dev; + work->target = *nh_gw; + dev_hold(dev); + work->dev = dev; schedule_work(&work->work); } @@ -612,7 +622,8 @@ static inline void rt6_probe(struct rt6_info *rt) */ static inline int rt6_check_dev(struct rt6_info *rt, int oif) { - struct net_device *dev = rt->dst.dev; + const struct net_device *dev = rt->fib6_nh.nh_dev; + if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && @@ -623,15 +634,16 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif) static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) { - struct neighbour *neigh; enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; + struct neighbour *neigh; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) return RT6_NUD_SUCCEED; rcu_read_lock_bh(); - neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); + neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev, + &rt->fib6_nh.nh_gw); if (neigh) { read_lock(&neigh->lock); if (neigh->nud_state & NUD_VALID) @@ -679,11 +691,11 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, bool match_do_rr = false; struct inet6_dev *idev = rt->rt6i_idev; - if (rt->rt6i_nh_flags & RTNH_F_DEAD) + if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) goto out; if (idev->cnf.ignore_routes_with_linkdown && - rt->rt6i_nh_flags & RTNH_F_LINKDOWN && + rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) goto out; @@ -888,7 +900,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, /* called with rcu_lock held */ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) { - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->fib6_nh.nh_dev; if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { /* for copies of local routes, dst->dev needs to be the @@ -928,7 +940,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; - rt->rt6i_gateway = ort->rt6i_gateway; + rt->rt6i_gateway = ort->fib6_nh.nh_gw; rt->rt6i_flags = ort->rt6i_flags; rt6_set_from(rt, ort); rt->rt6i_metric = ort->rt6i_metric; @@ -937,7 +949,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) #endif rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_table = ort->rt6i_table; - rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); + rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); } static struct fib6_node* fib6_backtrack(struct fib6_node *fn, @@ -1308,7 +1320,7 @@ __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, static int rt6_insert_exception(struct rt6_info *nrt, struct rt6_info *ort) { - struct net *net = dev_net(ort->dst.dev); + struct net *net = dev_net(nrt->dst.dev); struct rt6_exception_bucket *bucket; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; @@ -2313,7 +2325,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: for_each_fib6_node_rt_rcu(fn) { - if (rt->rt6i_nh_flags & RTNH_F_DEAD) + if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) continue; if (rt6_check_expired(rt)) continue; @@ -2321,14 +2333,14 @@ restart: break; if (!(rt->rt6i_flags & RTF_GATEWAY)) continue; - if (fl6->flowi6_oif != rt->dst.dev->ifindex) + if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex) continue; /* rt_cache's gateway might be different from its 'parent' * in the case of an ip redirect. * So we keep searching in the exception table if the gateway * is different. */ - if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) { + if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) { rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); @@ -2905,7 +2917,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, &lwtstate, extack); if (err) goto out; - rt->dst.lwtstate = lwtstate_get(lwtstate); + rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate); lwtunnel_set_redirect(&rt->dst); } @@ -2920,7 +2932,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, #endif rt->rt6i_metric = cfg->fc_metric; - rt->rt6i_nh_weight = 1; + rt->fib6_nh.nh_weight = 1; rt->fib6_type = cfg->fc_type; @@ -2975,7 +2987,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, if (err) goto out; - rt->rt6i_gateway = cfg->fc_gateway; + rt->fib6_nh.nh_gw = rt->rt6i_gateway = cfg->fc_gateway; } err = -ENODEV; @@ -3010,9 +3022,9 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, install_route: if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) && !netif_carrier_ok(dev)) - rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; - rt->rt6i_nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK); - rt->dst.dev = dev; + rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; + rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK); + rt->fib6_nh.nh_dev = rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; @@ -3171,11 +3183,11 @@ static int ip6_route_del(struct fib6_config *cfg, rt = rt_cache; } if (cfg->fc_ifindex && - (!rt->dst.dev || - rt->dst.dev->ifindex != cfg->fc_ifindex)) + (!rt->fib6_nh.nh_dev || + rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex)) continue; if (cfg->fc_flags & RTF_GATEWAY && - !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) + !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw)) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; @@ -3337,11 +3349,11 @@ static struct rt6_info *rt6_get_route_info(struct net *net, goto out; for_each_fib6_node_rt_rcu(fn) { - if (rt->dst.dev->ifindex != ifindex) + if (rt->fib6_nh.nh_dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; - if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) + if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) continue; ip6_hold_safe(NULL, &rt, false); break; @@ -3398,9 +3410,9 @@ struct rt6_info *rt6_get_dflt_router(struct net *net, rcu_read_lock(); for_each_fib6_node_rt_rcu(&table->tb6_root) { - if (dev == rt->dst.dev && + if (dev == rt->fib6_nh.nh_dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && - ipv6_addr_equal(&rt->rt6i_gateway, addr)) + ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) break; } if (rt) @@ -3627,6 +3639,8 @@ struct rt6_info *addrconf_dst_alloc(struct net *net, rt->rt6i_flags |= RTF_LOCAL; } + rt->fib6_nh.nh_gw = *addr; + rt->fib6_nh.nh_dev = dev; rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; @@ -3649,7 +3663,7 @@ static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) struct net *net = ((struct arg_dev_net_ip *)arg)->net; struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; - if (((void *)rt->dst.dev == dev || !dev) && + if (((void *)rt->fib6_nh.nh_dev == dev || !dev) && rt != net->ipv6.ip6_null_entry && ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { spin_lock_bh(&rt6_exception_lock); @@ -3681,7 +3695,7 @@ static int fib6_clean_tohost(struct rt6_info *rt, void *arg) struct in6_addr *gateway = (struct in6_addr *)arg; if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && - ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { + ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) { return -1; } @@ -3729,8 +3743,8 @@ static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt) static bool rt6_is_dead(const struct rt6_info *rt) { - if (rt->rt6i_nh_flags & RTNH_F_DEAD || - (rt->rt6i_nh_flags & RTNH_F_LINKDOWN && + if (rt->fib6_nh.nh_flags & RTNH_F_DEAD || + (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return true; @@ -3743,11 +3757,11 @@ static int rt6_multipath_total_weight(const struct rt6_info *rt) int total = 0; if (!rt6_is_dead(rt)) - total += rt->rt6i_nh_weight; + total += rt->fib6_nh.nh_weight; list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) { if (!rt6_is_dead(iter)) - total += iter->rt6i_nh_weight; + total += iter->fib6_nh.nh_weight; } return total; @@ -3758,11 +3772,11 @@ static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total) int upper_bound = -1; if (!rt6_is_dead(rt)) { - *weight += rt->rt6i_nh_weight; + *weight += rt->fib6_nh.nh_weight; upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, total) - 1; } - atomic_set(&rt->rt6i_nh_upper_bound, upper_bound); + atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound); } static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total) @@ -3805,8 +3819,8 @@ static int fib6_ifup(struct rt6_info *rt, void *p_arg) const struct arg_netdev_event *arg = p_arg; struct net *net = dev_net(arg->dev); - if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) { - rt->rt6i_nh_flags &= ~arg->nh_flags; + if (rt != net->ipv6.ip6_null_entry && rt->fib6_nh.nh_dev == arg->dev) { + rt->fib6_nh.nh_flags &= ~arg->nh_flags; fib6_update_sernum_upto_root(net, rt); rt6_multipath_rebalance(rt); } @@ -3834,10 +3848,10 @@ static bool rt6_multipath_uses_dev(const struct rt6_info *rt, { struct rt6_info *iter; - if (rt->dst.dev == dev) + if (rt->fib6_nh.nh_dev == dev) return true; list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) - if (iter->dst.dev == dev) + if (iter->fib6_nh.nh_dev == dev) return true; return false; @@ -3858,11 +3872,12 @@ static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt, struct rt6_info *iter; unsigned int dead = 0; - if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD) + if (rt->fib6_nh.nh_dev == down_dev || + rt->fib6_nh.nh_flags & RTNH_F_DEAD) dead++; list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) - if (iter->dst.dev == down_dev || - iter->rt6i_nh_flags & RTNH_F_DEAD) + if (iter->fib6_nh.nh_dev == down_dev || + iter->fib6_nh.nh_flags & RTNH_F_DEAD) dead++; return dead; @@ -3874,11 +3889,11 @@ static void rt6_multipath_nh_flags_set(struct rt6_info *rt, { struct rt6_info *iter; - if (rt->dst.dev == dev) - rt->rt6i_nh_flags |= nh_flags; + if (rt->fib6_nh.nh_dev == dev) + rt->fib6_nh.nh_flags |= nh_flags; list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) - if (iter->dst.dev == dev) - iter->rt6i_nh_flags |= nh_flags; + if (iter->fib6_nh.nh_dev == dev) + iter->fib6_nh.nh_flags |= nh_flags; } /* called with write lock held for table with rt */ @@ -3893,12 +3908,12 @@ static int fib6_ifdown(struct rt6_info *rt, void *p_arg) switch (arg->event) { case NETDEV_UNREGISTER: - return rt->dst.dev == dev ? -1 : 0; + return rt->fib6_nh.nh_dev == dev ? -1 : 0; case NETDEV_DOWN: if (rt->should_flush) return -1; if (!rt->rt6i_nsiblings) - return rt->dst.dev == dev ? -1 : 0; + return rt->fib6_nh.nh_dev == dev ? -1 : 0; if (rt6_multipath_uses_dev(rt, dev)) { unsigned int count; @@ -3914,10 +3929,10 @@ static int fib6_ifdown(struct rt6_info *rt, void *p_arg) } return -2; case NETDEV_CHANGE: - if (rt->dst.dev != dev || + if (rt->fib6_nh.nh_dev != dev || rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) break; - rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; + rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; rt6_multipath_rebalance(rt); break; } @@ -3969,7 +3984,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) Since RFC 1981 doesn't include administrative MTU increase update PMTU increase is a MUST. (i.e. jumbo frame) */ - if (rt->dst.dev == arg->dev && + if (rt->fib6_nh.nh_dev == arg->dev && !dst_metric_locked(&rt->dst, RTAX_MTU)) { spin_lock_bh(&rt6_exception_lock); if (dst_metric_raw(&rt->dst, RTAX_MTU) && @@ -4255,7 +4270,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, goto cleanup; } - rt->rt6i_nh_weight = rtnh->rtnh_hops + 1; + rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { @@ -4412,7 +4427,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ - + lwtunnel_get_encap_size(rt->dst.lwtstate); + + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate); nexthop_len *= rt->rt6i_nsiblings; } @@ -4430,38 +4445,38 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(1) /* RTA_PREF */ - + lwtunnel_get_encap_size(rt->dst.lwtstate) + + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate) + nexthop_len; } static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, unsigned int *flags, bool skip_oif) { - if (rt->rt6i_nh_flags & RTNH_F_DEAD) + if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) *flags |= RTNH_F_DEAD; - if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) { + if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) { *flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; } if (rt->rt6i_flags & RTF_GATEWAY) { - if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) + if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0) goto nla_put_failure; } - *flags |= (rt->rt6i_nh_flags & RTNH_F_ONLINK); - if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD) + *flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK); + if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD) *flags |= RTNH_F_OFFLOAD; /* not needed for multipath encoding b/c it has a rtnexthop struct */ - if (!skip_oif && rt->dst.dev && - nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + if (!skip_oif && rt->fib6_nh.nh_dev && + nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex)) goto nla_put_failure; - if (rt->dst.lwtstate && - lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) + if (rt->fib6_nh.nh_lwtstate && + lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0) goto nla_put_failure; return 0; @@ -4473,6 +4488,7 @@ nla_put_failure: /* add multipath next hop */ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) { + const struct net_device *dev = rt->fib6_nh.nh_dev; struct rtnexthop *rtnh; unsigned int flags = 0; @@ -4480,8 +4496,8 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) if (!rtnh) goto nla_put_failure; - rtnh->rtnh_hops = rt->rt6i_nh_weight - 1; - rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; + rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1; + rtnh->rtnh_ifindex = dev ? dev->ifindex : 0; if (rt6_nexthop_info(skb, rt, &flags, true) < 0) goto nla_put_failure; -- cgit v1.2.3 From 8d1c802b2815edc97af8a58c5045ebaf3848621a Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 17 Apr 2018 17:33:26 -0700 Subject: net/ipv6: Flip FIB entries to fib6_info Convert all code paths referencing a FIB entry from rt6_info to fib6_info. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 64 ++--- include/net/if_inet6.h | 4 +- include/net/ip6_fib.h | 42 ++-- include/net/ip6_route.h | 28 +-- include/net/netns/ipv6.h | 2 +- net/ipv6/addrconf.c | 20 +- net/ipv6/anycast.c | 4 +- net/ipv6/ip6_fib.c | 116 ++++----- net/ipv6/ndisc.c | 2 +- net/ipv6/route.c | 259 +++++++++++---------- 10 files changed, 271 insertions(+), 270 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d995a0b52d7c..b85b15851841 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -442,7 +442,7 @@ struct mlxsw_sp_fib6_entry { struct mlxsw_sp_rt6 { struct list_head list; - struct rt6_info *rt; + struct fib6_info *rt; }; struct mlxsw_sp_lpm_tree { @@ -3834,7 +3834,7 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev && ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, @@ -3920,7 +3920,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, common); list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; } @@ -4699,7 +4699,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } -static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) +static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { /* Packets with link-local destination IP arriving to the router * are trapped to the CPU, so no need to program specific routes @@ -4721,7 +4721,7 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) return false; } -static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) +static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -4734,18 +4734,18 @@ static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) * memory. */ mlxsw_sp_rt6->rt = rt; - rt6_hold(rt); + fib6_info_hold(rt); return mlxsw_sp_rt6; } #if IS_ENABLED(CONFIG_IPV6) -static void mlxsw_sp_rt6_release(struct rt6_info *rt) +static void mlxsw_sp_rt6_release(struct fib6_info *rt) { - rt6_release(rt); + fib6_info_release(rt); } #else -static void mlxsw_sp_rt6_release(struct rt6_info *rt) +static void mlxsw_sp_rt6_release(struct fib6_info *rt) { } #endif @@ -4756,13 +4756,13 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) kfree(mlxsw_sp_rt6); } -static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt) +static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) { /* RTF_CACHE routes are ignored */ return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; } -static struct rt6_info * +static struct fib6_info * mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) { return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, @@ -4771,7 +4771,7 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt, bool replace) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; @@ -4779,7 +4779,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same * virtual router. @@ -4802,7 +4802,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, static struct mlxsw_sp_rt6 * mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, - const struct rt6_info *rt) + const struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -4815,7 +4815,7 @@ mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, } static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt, + const struct fib6_info *rt, enum mlxsw_sp_ipip_type *ret) { return rt->fib6_nh.nh_dev && @@ -4825,7 +4825,7 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh, - const struct rt6_info *rt) + const struct fib6_info *rt) { const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -4870,7 +4870,7 @@ static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh, - const struct rt6_info *rt) + const struct fib6_info *rt) { struct net_device *dev = rt->fib6_nh.nh_dev; @@ -4897,7 +4897,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, } static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt) + const struct fib6_info *rt) { return rt->rt6i_flags & RTF_GATEWAY || mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); @@ -4928,7 +4928,7 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); nh_grp->count = fib6_entry->nrt6; for (i = 0; i < nh_grp->count; i++) { - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; nh = &nh_grp->nexthops[i]; err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); @@ -5040,7 +5040,7 @@ err_nexthop6_group_get: static int mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; int err; @@ -5068,7 +5068,7 @@ err_nexthop6_group_update: static void mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -5084,7 +5084,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, - const struct rt6_info *rt) + const struct fib6_info *rt) { /* Packets hitting RTF_REJECT routes need to be discarded by the * stack. We can rely on their destination device not having a @@ -5118,7 +5118,7 @@ mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_entry *fib_entry; @@ -5168,12 +5168,12 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt, bool replace) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) continue; @@ -5198,7 +5198,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, bool replace) { struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; - struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); + struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); struct mlxsw_sp_fib6_entry *fib6_entry; fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace); @@ -5213,7 +5213,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, struct mlxsw_sp_fib6_entry *last; list_for_each_entry(last, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last); if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) break; @@ -5268,7 +5268,7 @@ mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt) + const struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5287,7 +5287,7 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && rt->rt6i_metric == iter_rt->rt6i_metric && @@ -5316,7 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct rt6_info *rt, bool replace) + struct fib6_info *rt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5373,7 +5373,7 @@ err_fib6_entry_nexthop_add: } static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5836,7 +5836,7 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, fen6_info = container_of(info, struct fib6_entry_notifier_info, info); fib_work->fen6_info = *fen6_info; - rt6_hold(fib_work->fen6_info.rt); + fib6_info_hold(fib_work->fen6_info.rt); break; } } diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index d4088d1a688d..d6089b2e64fe 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -64,7 +64,7 @@ struct inet6_ifaddr { struct delayed_work dad_work; struct inet6_dev *idev; - struct rt6_info *rt; + struct fib6_info *rt; struct hlist_node addr_lst; struct list_head if_list; @@ -144,7 +144,7 @@ struct ipv6_ac_socklist { struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; - struct rt6_info *aca_rt; + struct fib6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; refcount_t aca_refcnt; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 6c3d92bb3459..d41b7bd69fb3 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -75,12 +75,12 @@ struct fib6_node { #ifdef CONFIG_IPV6_SUBTREES struct fib6_node __rcu *subtree; #endif - struct rt6_info __rcu *leaf; + struct fib6_info __rcu *leaf; __u16 fn_bit; /* bit key */ __u16 fn_flags; int fn_sernum; - struct rt6_info __rcu *rr_ptr; + struct fib6_info __rcu *rr_ptr; struct rcu_head rcu; }; @@ -176,7 +176,7 @@ struct fib6_info { struct rt6_info { struct dst_entry dst; struct rt6_info __rcu *rt6_next; - struct rt6_info *from; + struct fib6_info *from; /* * Tail elements of dst_entry (__refcnt etc.) @@ -242,20 +242,20 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) return ((struct rt6_info *)dst)->rt6i_idev; } -static inline void fib6_clean_expires(struct rt6_info *f6i) +static inline void fib6_clean_expires(struct fib6_info *f6i) { f6i->rt6i_flags &= ~RTF_EXPIRES; f6i->expires = 0; } -static inline void fib6_set_expires(struct rt6_info *f6i, +static inline void fib6_set_expires(struct fib6_info *f6i, unsigned long expires) { f6i->expires = expires; f6i->rt6i_flags |= RTF_EXPIRES; } -static inline bool fib6_check_expired(const struct rt6_info *f6i) +static inline bool fib6_check_expired(const struct fib6_info *f6i) { if (f6i->rt6i_flags & RTF_EXPIRES) return time_after(jiffies, f6i->expires); @@ -288,7 +288,7 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) * Return true if we can get cookie safely * Return false if not */ -static inline bool rt6_get_cookie_safe(const struct rt6_info *rt, +static inline bool rt6_get_cookie_safe(const struct fib6_info *rt, u32 *cookie) { struct fib6_node *fn; @@ -330,15 +330,15 @@ static inline void ip6_rt_put(struct rt6_info *rt) void rt6_free_pcpu(struct rt6_info *non_pcpu_rt); -struct rt6_info *fib6_info_alloc(gfp_t gfp_flags); -void fib6_info_destroy(struct rt6_info *f6i); +struct fib6_info *fib6_info_alloc(gfp_t gfp_flags); +void fib6_info_destroy(struct fib6_info *f6i); -static inline void fib6_info_hold(struct rt6_info *f6i) +static inline void fib6_info_hold(struct fib6_info *f6i) { atomic_inc(&f6i->rt6i_ref); } -static inline void fib6_info_release(struct rt6_info *f6i) +static inline void fib6_info_release(struct fib6_info *f6i) { if (f6i && atomic_dec_and_test(&f6i->rt6i_ref)) fib6_info_destroy(f6i); @@ -371,7 +371,7 @@ enum fib6_walk_state { struct fib6_walker { struct list_head lh; struct fib6_node *root, *node; - struct rt6_info *leaf; + struct fib6_info *leaf; enum fib6_walk_state state; unsigned int skip; unsigned int count; @@ -435,7 +435,7 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *, struct fib6_entry_notifier_info { struct fib_notifier_info info; /* must be first */ - struct rt6_info *rt; + struct fib6_info *rt; }; /* @@ -457,14 +457,14 @@ struct fib6_node *fib6_locate(struct fib6_node *root, const struct in6_addr *saddr, int src_len, bool exact_match); -void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), +void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg), void *arg); -int fib6_add(struct fib6_node *root, struct rt6_info *rt, +int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack); -int fib6_del(struct rt6_info *rt, struct nl_info *info); +int fib6_del(struct fib6_info *rt, struct nl_info *info); -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, +void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, unsigned int flags); void fib6_run_gc(unsigned long expires, struct net *net, bool force); @@ -487,11 +487,11 @@ void __net_exit fib6_notifier_exit(struct net *net); unsigned int fib6_tables_seq_read(struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb); -void fib6_update_sernum(struct net *net, struct rt6_info *rt); -void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt); +void fib6_update_sernum(struct net *net, struct fib6_info *rt); +void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt); -void fib6_metric_set(struct rt6_info *f6i, int metric, u32 val); -static inline bool fib6_metric_locked(struct rt6_info *f6i, int metric) +void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val); +static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) { return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric)); } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 57d0d45667f1..d5fb1e4ae7ac 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -66,7 +66,7 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } -static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt) +static inline bool rt6_qualify_for_ecmp(const struct fib6_info *rt) { return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == RTF_GATEWAY; @@ -102,14 +102,14 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); -int ip6_ins_rt(struct net *net, struct rt6_info *rt); -int ip6_del_rt(struct net *net, struct rt6_info *rt); +int ip6_ins_rt(struct net *net, struct fib6_info *rt); +int ip6_del_rt(struct net *net, struct fib6_info *rt); -void rt6_flush_exceptions(struct rt6_info *rt); -void rt6_age_exceptions(struct rt6_info *rt, struct fib6_gc_args *gc_args, +void rt6_flush_exceptions(struct fib6_info *rt); +void rt6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args, unsigned long now); -static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt, +static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *rt, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) @@ -136,9 +136,9 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6); void fib6_force_start_gc(struct net *net); -struct rt6_info *addrconf_dst_alloc(struct net *net, struct inet6_dev *idev, - const struct in6_addr *addr, bool anycast, - gfp_t gfp_flags); +struct fib6_info *addrconf_dst_alloc(struct net *net, struct inet6_dev *idev, + const struct in6_addr *addr, bool anycast, + gfp_t gfp_flags); struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags); @@ -147,10 +147,10 @@ struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, * support functions for ND * */ -struct rt6_info *rt6_get_dflt_router(struct net *net, +struct fib6_info *rt6_get_dflt_router(struct net *net, const struct in6_addr *addr, struct net_device *dev); -struct rt6_info *rt6_add_dflt_router(struct net *net, +struct fib6_info *rt6_add_dflt_router(struct net *net, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref); @@ -176,14 +176,14 @@ struct rt6_rtnl_dump_arg { struct net *net; }; -int rt6_dump_route(struct rt6_info *rt, void *p_arg); +int rt6_dump_route(struct fib6_info *rt, void *p_arg); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); void rt6_sync_up(struct net_device *dev, unsigned int nh_flags); void rt6_disable_ip(struct net_device *dev, unsigned long event); void rt6_sync_down_dev(struct net_device *dev, unsigned long event); -void rt6_multipath_rebalance(struct rt6_info *rt); +void rt6_multipath_rebalance(struct fib6_info *rt); void rt6_uncached_list_add(struct rt6_info *rt); void rt6_uncached_list_del(struct rt6_info *rt); @@ -271,7 +271,7 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, return daddr; } -static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) +static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) { return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev && a->rt6i_idev == b->rt6i_idev && diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 74e4e1e449d5..97b3a54579c8 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -60,7 +60,7 @@ struct netns_ipv6 { #endif struct xt_table *ip6table_nat; #endif - struct rt6_info *fib6_null_entry; + struct fib6_info *fib6_null_entry; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e533a447f68c..a294e86a9b25 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -170,7 +170,7 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event); static int addrconf_ifdown(struct net_device *dev, int how); -static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, +static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, const struct net_device *dev, u32 flags, u32 noflags); @@ -994,7 +994,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC; struct net *net = dev_net(idev->dev); struct inet6_ifaddr *ifa = NULL; - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; int err = 0; int addr_type = ipv6_addr_type(addr); @@ -1178,7 +1178,7 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) static void cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt) { - struct rt6_info *rt; + struct fib6_info *rt; rt = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, @@ -2348,13 +2348,13 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, } -static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, +static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, const struct net_device *dev, u32 flags, u32 noflags) { struct fib6_node *fn; - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; struct fib6_table *table; u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX; @@ -2641,7 +2641,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) */ if (pinfo->onlink) { - struct rt6_info *rt; + struct fib6_info *rt; unsigned long rt_expires; /* Avoid arithmetic overflow. Really, we could @@ -3346,7 +3346,7 @@ static int fixup_permanent_addr(struct net *net, * case regenerate the host route. */ if (!ifp->rt || !ifp->rt->rt6i_node) { - struct rt6_info *rt, *prev; + struct fib6_info *rt, *prev; rt = addrconf_dst_alloc(net, idev, &ifp->addr, false, GFP_ATOMIC); @@ -3713,7 +3713,7 @@ restart: keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6); list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; bool keep; addrconf_del_dad_work(ifa); @@ -5626,7 +5626,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) addrconf_leave_anycast(ifp); addrconf_leave_solict(ifp->idev, &ifp->addr); if (!ipv6_addr_any(&ifp->peer_addr)) { - struct rt6_info *rt; + struct fib6_info *rt; rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, ifp->idev->dev, 0, 0); @@ -5982,7 +5982,7 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val) list_for_each_entry(ifa, &idev->addr_list, if_list) { spin_lock(&ifa->lock); if (ifa->rt) { - struct rt6_info *rt = ifa->rt; + struct fib6_info *rt = ifa->rt; int cpu; rcu_read_lock(); diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 3db8fe10322b..0e35657501a7 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -218,7 +218,7 @@ static void aca_put(struct ifacaddr6 *ac) } } -static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, +static struct ifacaddr6 *aca_alloc(struct fib6_info *rt, const struct in6_addr *addr) { struct inet6_dev *idev = rt->rt6i_idev; @@ -247,7 +247,7 @@ static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr) { struct ifacaddr6 *aca; - struct rt6_info *rt; + struct fib6_info *rt; struct net *net; int err; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 4d6bd033dccd..77cf43b2d858 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -43,7 +43,7 @@ static struct kmem_cache *fib6_node_kmem __read_mostly; struct fib6_cleaner { struct fib6_walker w; struct net *net; - int (*func)(struct rt6_info *, void *arg); + int (*func)(struct fib6_info *, void *arg); int sernum; void *arg; }; @@ -54,7 +54,7 @@ struct fib6_cleaner { #define FWS_INIT FWS_L #endif -static struct rt6_info *fib6_find_prefix(struct net *net, +static struct fib6_info *fib6_find_prefix(struct net *net, struct fib6_table *table, struct fib6_node *fn); static struct fib6_node *fib6_repair_tree(struct net *net, @@ -105,7 +105,7 @@ enum { FIB6_NO_SERNUM_CHANGE = 0, }; -void fib6_update_sernum(struct net *net, struct rt6_info *rt) +void fib6_update_sernum(struct net *net, struct fib6_info *rt) { struct fib6_node *fn; @@ -145,9 +145,9 @@ static __be32 addr_bit_set(const void *token, int fn_bit) addr[fn_bit >> 5]; } -struct rt6_info *fib6_info_alloc(gfp_t gfp_flags) +struct fib6_info *fib6_info_alloc(gfp_t gfp_flags) { - struct rt6_info *f6i; + struct fib6_info *f6i; f6i = kzalloc(sizeof(*f6i), gfp_flags); if (!f6i) @@ -167,7 +167,7 @@ struct rt6_info *fib6_info_alloc(gfp_t gfp_flags) return f6i; } -void fib6_info_destroy(struct rt6_info *f6i) +void fib6_info_destroy(struct fib6_info *f6i) { struct rt6_exception_bucket *bucket; struct dst_metrics *m; @@ -404,7 +404,7 @@ unsigned int fib6_tables_seq_read(struct net *net) static int call_fib6_entry_notifier(struct notifier_block *nb, struct net *net, enum fib_event_type event_type, - struct rt6_info *rt) + struct fib6_info *rt) { struct fib6_entry_notifier_info info = { .rt = rt, @@ -415,7 +415,7 @@ static int call_fib6_entry_notifier(struct notifier_block *nb, struct net *net, static int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, - struct rt6_info *rt, + struct fib6_info *rt, struct netlink_ext_ack *extack) { struct fib6_entry_notifier_info info = { @@ -432,7 +432,7 @@ struct fib6_dump_arg { struct notifier_block *nb; }; -static void fib6_rt_dump(struct rt6_info *rt, struct fib6_dump_arg *arg) +static void fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg) { if (rt == arg->net->ipv6.fib6_null_entry) return; @@ -441,7 +441,7 @@ static void fib6_rt_dump(struct rt6_info *rt, struct fib6_dump_arg *arg) static int fib6_node_dump(struct fib6_walker *w) { - struct rt6_info *rt; + struct fib6_info *rt; for_each_fib6_walker_rt(w) fib6_rt_dump(rt, w->args); @@ -490,7 +490,7 @@ int fib6_tables_dump(struct net *net, struct notifier_block *nb) static int fib6_dump_node(struct fib6_walker *w) { int res; - struct rt6_info *rt; + struct fib6_info *rt; for_each_fib6_walker_rt(w) { res = rt6_dump_route(rt, w->args); @@ -507,7 +507,7 @@ static int fib6_dump_node(struct fib6_walker *w) */ if (rt->rt6i_nsiblings) rt = list_last_entry(&rt->rt6i_siblings, - struct rt6_info, + struct fib6_info, rt6i_siblings); } w->leaf = NULL; @@ -643,7 +643,7 @@ out: return res; } -void fib6_metric_set(struct rt6_info *f6i, int metric, u32 val) +void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val) { if (!f6i) return; @@ -690,7 +690,7 @@ static struct fib6_node *fib6_add_1(struct net *net, fn = root; do { - struct rt6_info *leaf = rcu_dereference_protected(fn->leaf, + struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); key = (struct rt6key *)((u8 *)leaf + offset); @@ -884,7 +884,7 @@ insert_above: return ln; } -static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, +static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, struct net *net) { struct fib6_table *table = rt->rt6i_table; @@ -897,9 +897,9 @@ static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, * to still alive ones. */ while (fn) { - struct rt6_info *leaf = rcu_dereference_protected(fn->leaf, + struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); - struct rt6_info *new_leaf; + struct fib6_info *new_leaf; if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) { new_leaf = fib6_find_prefix(net, table, fn); atomic_inc(&new_leaf->rt6i_ref); @@ -936,15 +936,15 @@ static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, * Insert routing information in a node. */ -static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, +static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack) { - struct rt6_info *leaf = rcu_dereference_protected(fn->leaf, + struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&rt->rt6i_table->tb6_lock)); - struct rt6_info *iter = NULL; - struct rt6_info __rcu **ins; - struct rt6_info __rcu **fallback_ins = NULL; + struct fib6_info *iter = NULL; + struct fib6_info __rcu **ins; + struct fib6_info __rcu **fallback_ins = NULL; int replace = (info->nlh && (info->nlh->nlmsg_flags & NLM_F_REPLACE)); int add = (!info->nlh || @@ -1035,7 +1035,7 @@ next_iter: /* Link this route to others same route. */ if (rt->rt6i_nsiblings) { unsigned int rt6i_nsiblings; - struct rt6_info *sibling, *temp_sibling; + struct fib6_info *sibling, *temp_sibling; /* Find the first route that have the same metric */ sibling = leaf; @@ -1156,7 +1156,7 @@ add: return 0; } -static void fib6_start_gc(struct net *net, struct rt6_info *rt) +static void fib6_start_gc(struct net *net, struct fib6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && (rt->rt6i_flags & RTF_EXPIRES)) @@ -1171,7 +1171,7 @@ void fib6_force_start_gc(struct net *net) jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } -static void __fib6_update_sernum_upto_root(struct rt6_info *rt, +static void __fib6_update_sernum_upto_root(struct fib6_info *rt, int sernum) { struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, @@ -1186,7 +1186,7 @@ static void __fib6_update_sernum_upto_root(struct rt6_info *rt, } } -void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt) +void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt) { __fib6_update_sernum_upto_root(rt, fib6_new_sernum(net)); } @@ -1198,7 +1198,7 @@ void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt) * Need to own table->tb6_lock */ -int fib6_add(struct fib6_node *root, struct rt6_info *rt, +int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack) { struct fib6_table *table = rt->rt6i_table; @@ -1219,7 +1219,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, fn = fib6_add_1(info->nl_net, table, root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, - offsetof(struct rt6_info, rt6i_dst), allow_create, + offsetof(struct fib6_info, rt6i_dst), allow_create, replace_required, extack); if (IS_ERR(fn)) { err = PTR_ERR(fn); @@ -1260,7 +1260,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, sn = fib6_add_1(info->nl_net, table, sfn, &rt->rt6i_src.addr, rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src), + offsetof(struct fib6_info, rt6i_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { @@ -1279,7 +1279,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, } else { sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn), &rt->rt6i_src.addr, rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src), + offsetof(struct fib6_info, rt6i_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { @@ -1316,7 +1316,7 @@ out: * super-tree leaf node we have to find a new one for it. */ if (pn != fn) { - struct rt6_info *pn_leaf = + struct fib6_info *pn_leaf = rcu_dereference_protected(pn->leaf, lockdep_is_held(&table->tb6_lock)); if (pn_leaf == rt) { @@ -1365,7 +1365,7 @@ failure: */ struct lookup_args { - int offset; /* key offset on rt6_info */ + int offset; /* key offset on fib6_info */ const struct in6_addr *addr; /* search key */ }; @@ -1403,7 +1403,7 @@ static struct fib6_node *fib6_lookup_1(struct fib6_node *root, struct fib6_node *subtree = FIB6_SUBTREE(fn); if (subtree || fn->fn_flags & RTN_RTINFO) { - struct rt6_info *leaf = rcu_dereference(fn->leaf); + struct fib6_info *leaf = rcu_dereference(fn->leaf); struct rt6key *key; if (!leaf) @@ -1443,12 +1443,12 @@ struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *dad struct fib6_node *fn; struct lookup_args args[] = { { - .offset = offsetof(struct rt6_info, rt6i_dst), + .offset = offsetof(struct fib6_info, rt6i_dst), .addr = daddr, }, #ifdef CONFIG_IPV6_SUBTREES { - .offset = offsetof(struct rt6_info, rt6i_src), + .offset = offsetof(struct fib6_info, rt6i_src), .addr = saddr, }, #endif @@ -1484,7 +1484,7 @@ static struct fib6_node *fib6_locate_1(struct fib6_node *root, struct fib6_node *fn, *prev = NULL; for (fn = root; fn ; ) { - struct rt6_info *leaf = rcu_dereference(fn->leaf); + struct fib6_info *leaf = rcu_dereference(fn->leaf); struct rt6key *key; /* This node is being deleted */ @@ -1533,7 +1533,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root, struct fib6_node *fn; fn = fib6_locate_1(root, daddr, dst_len, - offsetof(struct rt6_info, rt6i_dst), + offsetof(struct fib6_info, rt6i_dst), exact_match); #ifdef CONFIG_IPV6_SUBTREES @@ -1544,7 +1544,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root, if (subtree) { fn = fib6_locate_1(subtree, saddr, src_len, - offsetof(struct rt6_info, rt6i_src), + offsetof(struct fib6_info, rt6i_src), exact_match); } } @@ -1563,7 +1563,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root, * */ -static struct rt6_info *fib6_find_prefix(struct net *net, +static struct fib6_info *fib6_find_prefix(struct net *net, struct fib6_table *table, struct fib6_node *fn) { @@ -1622,11 +1622,11 @@ static struct fib6_node *fib6_repair_tree(struct net *net, lockdep_is_held(&table->tb6_lock)); struct fib6_node *pn_l = rcu_dereference_protected(pn->left, lockdep_is_held(&table->tb6_lock)); - struct rt6_info *fn_leaf = rcu_dereference_protected(fn->leaf, + struct fib6_info *fn_leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); - struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf, + struct fib6_info *pn_leaf = rcu_dereference_protected(pn->leaf, lockdep_is_held(&table->tb6_lock)); - struct rt6_info *new_fn_leaf; + struct fib6_info *new_fn_leaf; RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); iter++; @@ -1717,10 +1717,10 @@ static struct fib6_node *fib6_repair_tree(struct net *net, } static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, - struct rt6_info __rcu **rtp, struct nl_info *info) + struct fib6_info __rcu **rtp, struct nl_info *info) { struct fib6_walker *w; - struct rt6_info *rt = rcu_dereference_protected(*rtp, + struct fib6_info *rt = rcu_dereference_protected(*rtp, lockdep_is_held(&table->tb6_lock)); struct net *net = info->nl_net; @@ -1741,7 +1741,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Remove this entry from other siblings */ if (rt->rt6i_nsiblings) { - struct rt6_info *sibling, *next_sibling; + struct fib6_info *sibling, *next_sibling; list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) @@ -1785,14 +1785,14 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, } /* Need to own table->tb6_lock */ -int fib6_del(struct rt6_info *rt, struct nl_info *info) +int fib6_del(struct fib6_info *rt, struct nl_info *info) { struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, lockdep_is_held(&rt->rt6i_table->tb6_lock)); struct fib6_table *table = rt->rt6i_table; struct net *net = info->nl_net; - struct rt6_info __rcu **rtp; - struct rt6_info __rcu **rtp_next; + struct fib6_info __rcu **rtp; + struct fib6_info __rcu **rtp_next; if (!fn || rt == net->ipv6.fib6_null_entry) return -ENOENT; @@ -1804,7 +1804,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) */ for (rtp = &fn->leaf; *rtp; rtp = rtp_next) { - struct rt6_info *cur = rcu_dereference_protected(*rtp, + struct fib6_info *cur = rcu_dereference_protected(*rtp, lockdep_is_held(&table->tb6_lock)); if (rt == cur) { fib6_del_route(table, fn, rtp, info); @@ -1948,7 +1948,7 @@ static int fib6_walk(struct net *net, struct fib6_walker *w) static int fib6_clean_node(struct fib6_walker *w) { int res; - struct rt6_info *rt; + struct fib6_info *rt; struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); struct nl_info info = { .nl_net = c->net, @@ -1983,7 +1983,7 @@ static int fib6_clean_node(struct fib6_walker *w) if (WARN_ON(!rt->rt6i_nsiblings)) continue; rt = list_last_entry(&rt->rt6i_siblings, - struct rt6_info, rt6i_siblings); + struct fib6_info, rt6i_siblings); continue; } WARN_ON(res != 0); @@ -2002,7 +2002,7 @@ static int fib6_clean_node(struct fib6_walker *w) */ static void fib6_clean_tree(struct net *net, struct fib6_node *root, - int (*func)(struct rt6_info *, void *arg), + int (*func)(struct fib6_info *, void *arg), int sernum, void *arg) { struct fib6_cleaner c; @@ -2020,7 +2020,7 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root, } static void __fib6_clean_all(struct net *net, - int (*func)(struct rt6_info *, void *), + int (*func)(struct fib6_info *, void *), int sernum, void *arg) { struct fib6_table *table; @@ -2040,7 +2040,7 @@ static void __fib6_clean_all(struct net *net, rcu_read_unlock(); } -void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *), +void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *), void *arg) { __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg); @@ -2057,7 +2057,7 @@ static void fib6_flush_trees(struct net *net) * Garbage collection */ -static int fib6_age(struct rt6_info *rt, void *arg) +static int fib6_age(struct fib6_info *rt, void *arg) { struct fib6_gc_args *gc_args = arg; unsigned long now = jiffies; @@ -2261,7 +2261,7 @@ struct ipv6_route_iter { static int ipv6_route_seq_show(struct seq_file *seq, void *v) { - struct rt6_info *rt = v; + struct fib6_info *rt = v; struct ipv6_route_iter *iter = seq->private; const struct net_device *dev; @@ -2353,14 +2353,14 @@ static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int r; - struct rt6_info *n; + struct fib6_info *n; struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; if (!v) goto iter_table; - n = rcu_dereference_bh(((struct rt6_info *)v)->rt6_next); + n = rcu_dereference_bh(((struct fib6_info *)v)->rt6_next); if (n) { ++*pos; return n; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index a28857088bff..102645298692 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1155,7 +1155,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); struct neighbour *neigh = NULL; struct inet6_dev *in6_dev; - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; struct net *net; int lifetime; struct ndisc_options ndopts; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0d861bd07673..2ccf939e1a20 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -96,24 +96,24 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu); static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); -static int rt6_score_route(struct rt6_info *rt, int oif, int strict); -static size_t rt6_nlmsg_size(struct rt6_info *rt); +static int rt6_score_route(struct fib6_info *rt, int oif, int strict); +static size_t rt6_nlmsg_size(struct fib6_info *rt); static int rt6_fill_node(struct net *net, struct sk_buff *skb, - struct rt6_info *rt, struct dst_entry *dst, + struct fib6_info *rt, struct dst_entry *dst, struct in6_addr *dest, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, unsigned int flags); -static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, +static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, struct in6_addr *daddr, struct in6_addr *saddr); #ifdef CONFIG_IPV6_ROUTE_INFO -static struct rt6_info *rt6_add_route_info(struct net *net, +static struct fib6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref); -static struct rt6_info *rt6_get_route_info(struct net *net, +static struct fib6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev); @@ -283,7 +283,7 @@ static const u32 ip6_template_metrics[RTAX_MAX] = { [RTAX_HOPLIMIT - 1] = 0, }; -static const struct rt6_info fib6_null_entry_template = { +static const struct fib6_info fib6_null_entry_template = { .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32)0, @@ -372,7 +372,7 @@ EXPORT_SYMBOL(ip6_dst_alloc); static void ip6_dst_destroy(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *)dst; - struct rt6_info *from = rt->from; + struct fib6_info *from = rt->from; struct inet6_dev *idev; dst_destroy_metrics_generic(dst); @@ -425,13 +425,13 @@ static bool rt6_check_expired(const struct rt6_info *rt) return false; } -static struct rt6_info *rt6_multipath_select(const struct net *net, - struct rt6_info *match, +static struct fib6_info *rt6_multipath_select(const struct net *net, + struct fib6_info *match, struct flowi6 *fl6, int oif, const struct sk_buff *skb, int strict) { - struct rt6_info *sibling, *next_sibling; + struct fib6_info *sibling, *next_sibling; /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. @@ -462,14 +462,14 @@ static struct rt6_info *rt6_multipath_select(const struct net *net, * Route lookup. rcu_read_lock() should be held. */ -static inline struct rt6_info *rt6_device_match(struct net *net, - struct rt6_info *rt, +static inline struct fib6_info *rt6_device_match(struct net *net, + struct fib6_info *rt, const struct in6_addr *saddr, int oif, int flags) { - struct rt6_info *local = NULL; - struct rt6_info *sprt; + struct fib6_info *local = NULL; + struct fib6_info *sprt; if (!oif && ipv6_addr_any(saddr) && !(rt->fib6_nh.nh_flags & RTNH_F_DEAD)) @@ -532,7 +532,7 @@ static void rt6_probe_deferred(struct work_struct *w) kfree(work); } -static void rt6_probe(struct rt6_info *rt) +static void rt6_probe(struct fib6_info *rt) { struct __rt6_probe_work *work; const struct in6_addr *nh_gw; @@ -585,7 +585,7 @@ out: rcu_read_unlock_bh(); } #else -static inline void rt6_probe(struct rt6_info *rt) +static inline void rt6_probe(struct fib6_info *rt) { } #endif @@ -593,7 +593,7 @@ static inline void rt6_probe(struct rt6_info *rt) /* * Default Router Selection (RFC 2461 6.3.6) */ -static inline int rt6_check_dev(struct rt6_info *rt, int oif) +static inline int rt6_check_dev(struct fib6_info *rt, int oif) { const struct net_device *dev = rt->fib6_nh.nh_dev; @@ -605,7 +605,7 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif) return 0; } -static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) +static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt) { enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; struct neighbour *neigh; @@ -637,8 +637,7 @@ static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) return ret; } -static int rt6_score_route(struct rt6_info *rt, int oif, - int strict) +static int rt6_score_route(struct fib6_info *rt, int oif, int strict) { int m; @@ -656,8 +655,8 @@ static int rt6_score_route(struct rt6_info *rt, int oif, return m; } -static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, - int *mpri, struct rt6_info *match, +static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict, + int *mpri, struct fib6_info *match, bool *do_rr) { int m; @@ -696,13 +695,13 @@ out: return match; } -static struct rt6_info *find_rr_leaf(struct fib6_node *fn, - struct rt6_info *leaf, - struct rt6_info *rr_head, +static struct fib6_info *find_rr_leaf(struct fib6_node *fn, + struct fib6_info *leaf, + struct fib6_info *rr_head, u32 metric, int oif, int strict, bool *do_rr) { - struct rt6_info *rt, *match, *cont; + struct fib6_info *rt, *match, *cont; int mpri = -1; match = NULL; @@ -735,11 +734,11 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn, return match; } -static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn, +static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn, int oif, int strict) { - struct rt6_info *leaf = rcu_dereference(fn->leaf); - struct rt6_info *match, *rt0; + struct fib6_info *leaf = rcu_dereference(fn->leaf); + struct fib6_info *match, *rt0; bool do_rr = false; int key_plen; @@ -767,7 +766,7 @@ static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn, &do_rr); if (do_rr) { - struct rt6_info *next = rcu_dereference(rt0->rt6_next); + struct fib6_info *next = rcu_dereference(rt0->rt6_next); /* no entries matched; do round-robin */ if (!next || next->rt6i_metric != rt0->rt6i_metric) @@ -785,7 +784,7 @@ static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn, return match ? match : net->ipv6.fib6_null_entry; } -static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) +static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt) { return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); } @@ -799,7 +798,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, struct in6_addr prefix_buf, *prefix; unsigned int pref; unsigned long lifetime; - struct rt6_info *rt; + struct fib6_info *rt; if (len < sizeof(struct route_info)) { return -EINVAL; @@ -871,7 +870,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, */ /* called with rcu_lock held */ -static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) +static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt) { struct net_device *dev = rt->fib6_nh.nh_dev; @@ -913,7 +912,7 @@ static int ip6_rt_type_to_error(u8 fib6_type) return fib6_prop[fib6_type]; } -static unsigned short fib6_info_dst_flags(struct rt6_info *rt) +static unsigned short fib6_info_dst_flags(struct fib6_info *rt) { unsigned short flags = 0; @@ -927,7 +926,7 @@ static unsigned short fib6_info_dst_flags(struct rt6_info *rt) return flags; } -static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct rt6_info *ort) +static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) { rt->dst.error = ip6_rt_type_to_error(ort->fib6_type); @@ -949,7 +948,7 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct rt6_info *ort) } } -static void ip6_rt_init_dst(struct rt6_info *rt, struct rt6_info *ort) +static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) { rt->dst.flags |= fib6_info_dst_flags(ort); @@ -977,7 +976,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct rt6_info *ort) rt->dst.lastuse = jiffies; } -static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) +static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) { rt->rt6i_flags &= ~RTF_EXPIRES; fib6_info_hold(from); @@ -989,7 +988,7 @@ static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) } } -static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) +static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) { ip6_rt_init_dst(rt, ort); @@ -1045,7 +1044,7 @@ static bool ip6_hold_safe(struct net *net, struct rt6_info **prt, } /* called with rcu_lock held */ -static struct rt6_info *ip6_create_rt_rcu(struct rt6_info *rt) +static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) { unsigned short flags = fib6_info_dst_flags(rt); struct net_device *dev = rt->fib6_nh.nh_dev; @@ -1064,7 +1063,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, const struct sk_buff *skb, int flags) { - struct rt6_info *f6i; + struct fib6_info *f6i; struct fib6_node *fn; struct rt6_info *rt; @@ -1152,7 +1151,7 @@ EXPORT_SYMBOL(rt6_lookup); * Caller must hold dst before calling it. */ -static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, +static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack) { int err; @@ -1166,14 +1165,14 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, return err; } -int ip6_ins_rt(struct net *net, struct rt6_info *rt) +int ip6_ins_rt(struct net *net, struct fib6_info *rt) { struct nl_info info = { .nl_net = net, }; return __ip6_ins_rt(rt, &info, NULL); } -static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, +static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { @@ -1213,7 +1212,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, return rt; } -static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) +static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt) { unsigned short flags = fib6_info_dst_flags(rt); struct net_device *dev; @@ -1232,7 +1231,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) } /* It should be called with rcu_read_lock() acquired */ -static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) +static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt) { struct rt6_info *pcpu_rt, **p; @@ -1246,7 +1245,7 @@ static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) } static struct rt6_info *rt6_make_pcpu_route(struct net *net, - struct rt6_info *rt) + struct fib6_info *rt) { struct rt6_info *pcpu_rt, *prev, **p; @@ -1390,7 +1389,7 @@ __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, return NULL; } -static unsigned int fib6_mtu(const struct rt6_info *rt) +static unsigned int fib6_mtu(const struct fib6_info *rt) { unsigned int mtu; @@ -1401,7 +1400,7 @@ static unsigned int fib6_mtu(const struct rt6_info *rt) } static int rt6_insert_exception(struct rt6_info *nrt, - struct rt6_info *ort) + struct fib6_info *ort) { struct net *net = dev_net(nrt->dst.dev); struct rt6_exception_bucket *bucket; @@ -1487,7 +1486,7 @@ out: return err; } -void rt6_flush_exceptions(struct rt6_info *rt) +void rt6_flush_exceptions(struct fib6_info *rt) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; @@ -1517,7 +1516,7 @@ out: /* Find cached rt in the hash table inside passed in rt * Caller has to hold rcu_read_lock() */ -static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, +static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, struct in6_addr *daddr, struct in6_addr *saddr) { @@ -1550,7 +1549,7 @@ static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, static int rt6_remove_exception_rt(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; - struct rt6_info *from = rt->from; + struct fib6_info *from = rt->from; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; int err; @@ -1595,7 +1594,7 @@ static int rt6_remove_exception_rt(struct rt6_info *rt) static void rt6_update_exception_stamp_rt(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; - struct rt6_info *from = rt->from; + struct fib6_info *from = rt->from; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; @@ -1625,7 +1624,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) rcu_read_unlock(); } -static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) +static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; @@ -1667,7 +1666,7 @@ static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, } static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, - struct rt6_info *rt, int mtu) + struct fib6_info *rt, int mtu) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; @@ -1697,7 +1696,7 @@ static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) -static void rt6_exceptions_clean_tohost(struct rt6_info *rt, +static void rt6_exceptions_clean_tohost(struct fib6_info *rt, struct in6_addr *gateway) { struct rt6_exception_bucket *bucket; @@ -1776,7 +1775,7 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket, gc_args->more++; } -void rt6_age_exceptions(struct rt6_info *rt, +void rt6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args, unsigned long now) { @@ -1812,7 +1811,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, const struct sk_buff *skb, int flags) { struct fib6_node *fn, *saved_fn; - struct rt6_info *f6i; + struct fib6_info *f6i; struct rt6_info *rt; int strict = 0; @@ -2139,7 +2138,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori * Destination cache support functions */ -static bool fib6_check(struct rt6_info *f6i, u32 cookie) +static bool fib6_check(struct fib6_info *f6i, u32 cookie) { u32 rt_cookie = 0; @@ -2374,7 +2373,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; struct rt6_info *ret = NULL, *rt_cache; - struct rt6_info *rt; + struct fib6_info *rt; struct fib6_node *fn; /* Get the "current" route for this destination and @@ -2620,7 +2619,7 @@ out: return entries > rt_max_size; } -static int ip6_convert_metrics(struct net *net, struct rt6_info *rt, +static int ip6_convert_metrics(struct net *net, struct fib6_info *rt, struct fib6_config *cfg) { int err = 0; @@ -2823,12 +2822,12 @@ out: return err; } -static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, +static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; @@ -3047,7 +3046,7 @@ out: int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { - struct rt6_info *rt; + struct fib6_info *rt; int err; rt = ip6_route_info_create(cfg, gfp_flags, extack); @@ -3060,7 +3059,7 @@ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, return err; } -static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) +static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info) { struct net *net = info->nl_net; struct fib6_table *table; @@ -3081,14 +3080,14 @@ out: return err; } -int ip6_del_rt(struct net *net, struct rt6_info *rt) +int ip6_del_rt(struct net *net, struct fib6_info *rt) { struct nl_info info = { .nl_net = net }; return __ip6_del_rt(rt, &info); } -static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) +static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; struct net *net = info->nl_net; @@ -3102,7 +3101,7 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) spin_lock_bh(&table->tb6_lock); if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { - struct rt6_info *sibling, *next_sibling; + struct fib6_info *sibling, *next_sibling; /* prefer to send a single notification with all hops */ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); @@ -3159,8 +3158,9 @@ out: static int ip6_route_del(struct fib6_config *cfg, struct netlink_ext_ack *extack) { - struct rt6_info *rt, *rt_cache; + struct rt6_info *rt_cache; struct fib6_table *table; + struct fib6_info *rt; struct fib6_node *fn; int err = -ESRCH; @@ -3336,7 +3336,7 @@ out: } #ifdef CONFIG_IPV6_ROUTE_INFO -static struct rt6_info *rt6_get_route_info(struct net *net, +static struct fib6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev) @@ -3344,7 +3344,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; int ifindex = dev->ifindex; struct fib6_node *fn; - struct rt6_info *rt = NULL; + struct fib6_info *rt = NULL; struct fib6_table *table; table = fib6_get_table(net, tb_id); @@ -3363,7 +3363,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, continue; if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) continue; - ip6_hold_safe(NULL, &rt, false); + fib6_info_hold(rt); break; } out: @@ -3371,7 +3371,7 @@ out: return rt; } -static struct rt6_info *rt6_add_route_info(struct net *net, +static struct fib6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev, @@ -3404,12 +3404,12 @@ static struct rt6_info *rt6_add_route_info(struct net *net, } #endif -struct rt6_info *rt6_get_dflt_router(struct net *net, +struct fib6_info *rt6_get_dflt_router(struct net *net, const struct in6_addr *addr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; - struct rt6_info *rt; + struct fib6_info *rt; struct fib6_table *table; table = fib6_get_table(net, tb_id); @@ -3424,12 +3424,12 @@ struct rt6_info *rt6_get_dflt_router(struct net *net, break; } if (rt) - ip6_hold_safe(NULL, &rt, false); + fib6_info_hold(rt); rcu_read_unlock(); return rt; } -struct rt6_info *rt6_add_dflt_router(struct net *net, +struct fib6_info *rt6_add_dflt_router(struct net *net, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) @@ -3463,7 +3463,7 @@ struct rt6_info *rt6_add_dflt_router(struct net *net, static void __rt6_purge_dflt_routers(struct net *net, struct fib6_table *table) { - struct rt6_info *rt; + struct fib6_info *rt; restart: rcu_read_lock(); @@ -3614,14 +3614,14 @@ static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff * Allocate a dst for local (unicast / anycast) address. */ -struct rt6_info *addrconf_dst_alloc(struct net *net, +struct fib6_info *addrconf_dst_alloc(struct net *net, struct inet6_dev *idev, const struct in6_addr *addr, bool anycast, gfp_t gfp_flags) { u32 tb_id; struct net_device *dev = idev->dev; - struct rt6_info *rt; + struct fib6_info *rt; rt = fib6_info_alloc(gfp_flags); if (!rt) @@ -3661,7 +3661,7 @@ struct arg_dev_net_ip { struct in6_addr *addr; }; -static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) +static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) { struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; struct net *net = ((struct arg_dev_net_ip *)arg)->net; @@ -3694,7 +3694,7 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) /* Remove routers and update dst entries when gateway turn into host. */ -static int fib6_clean_tohost(struct rt6_info *rt, void *arg) +static int fib6_clean_tohost(struct fib6_info *rt, void *arg) { struct in6_addr *gateway = (struct in6_addr *)arg; @@ -3725,9 +3725,9 @@ struct arg_netdev_event { }; }; -static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt) +static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) { - struct rt6_info *iter; + struct fib6_info *iter; struct fib6_node *fn; fn = rcu_dereference_protected(rt->rt6i_node, @@ -3745,7 +3745,7 @@ static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt) return NULL; } -static bool rt6_is_dead(const struct rt6_info *rt) +static bool rt6_is_dead(const struct fib6_info *rt) { if (rt->fib6_nh.nh_flags & RTNH_F_DEAD || (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && @@ -3755,9 +3755,9 @@ static bool rt6_is_dead(const struct rt6_info *rt) return false; } -static int rt6_multipath_total_weight(const struct rt6_info *rt) +static int rt6_multipath_total_weight(const struct fib6_info *rt) { - struct rt6_info *iter; + struct fib6_info *iter; int total = 0; if (!rt6_is_dead(rt)) @@ -3771,7 +3771,7 @@ static int rt6_multipath_total_weight(const struct rt6_info *rt) return total; } -static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total) +static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total) { int upper_bound = -1; @@ -3783,9 +3783,9 @@ static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total) atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound); } -static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total) +static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total) { - struct rt6_info *iter; + struct fib6_info *iter; int weight = 0; rt6_upper_bound_set(rt, &weight, total); @@ -3794,9 +3794,9 @@ static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total) rt6_upper_bound_set(iter, &weight, total); } -void rt6_multipath_rebalance(struct rt6_info *rt) +void rt6_multipath_rebalance(struct fib6_info *rt) { - struct rt6_info *first; + struct fib6_info *first; int total; /* In case the entire multipath route was marked for flushing, @@ -3818,7 +3818,7 @@ void rt6_multipath_rebalance(struct rt6_info *rt) rt6_multipath_upper_bound_set(first, total); } -static int fib6_ifup(struct rt6_info *rt, void *p_arg) +static int fib6_ifup(struct fib6_info *rt, void *p_arg) { const struct arg_netdev_event *arg = p_arg; struct net *net = dev_net(arg->dev); @@ -3847,10 +3847,10 @@ void rt6_sync_up(struct net_device *dev, unsigned int nh_flags) fib6_clean_all(dev_net(dev), fib6_ifup, &arg); } -static bool rt6_multipath_uses_dev(const struct rt6_info *rt, +static bool rt6_multipath_uses_dev(const struct fib6_info *rt, const struct net_device *dev) { - struct rt6_info *iter; + struct fib6_info *iter; if (rt->fib6_nh.nh_dev == dev) return true; @@ -3861,19 +3861,19 @@ static bool rt6_multipath_uses_dev(const struct rt6_info *rt, return false; } -static void rt6_multipath_flush(struct rt6_info *rt) +static void rt6_multipath_flush(struct fib6_info *rt) { - struct rt6_info *iter; + struct fib6_info *iter; rt->should_flush = 1; list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) iter->should_flush = 1; } -static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt, +static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt, const struct net_device *down_dev) { - struct rt6_info *iter; + struct fib6_info *iter; unsigned int dead = 0; if (rt->fib6_nh.nh_dev == down_dev || @@ -3887,11 +3887,11 @@ static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt, return dead; } -static void rt6_multipath_nh_flags_set(struct rt6_info *rt, +static void rt6_multipath_nh_flags_set(struct fib6_info *rt, const struct net_device *dev, unsigned int nh_flags) { - struct rt6_info *iter; + struct fib6_info *iter; if (rt->fib6_nh.nh_dev == dev) rt->fib6_nh.nh_flags |= nh_flags; @@ -3901,7 +3901,7 @@ static void rt6_multipath_nh_flags_set(struct rt6_info *rt, } /* called with write lock held for table with rt */ -static int fib6_ifdown(struct rt6_info *rt, void *p_arg) +static int fib6_ifdown(struct fib6_info *rt, void *p_arg) { const struct arg_netdev_event *arg = p_arg; const struct net_device *dev = arg->dev; @@ -3968,7 +3968,7 @@ struct rt6_mtu_change_arg { unsigned int mtu; }; -static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) +static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg) { struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; struct inet6_dev *idev; @@ -4155,7 +4155,7 @@ errout: } struct rt6_nh { - struct rt6_info *rt6_info; + struct fib6_info *fib6_info; struct fib6_config r_cfg; struct list_head next; }; @@ -4173,21 +4173,22 @@ static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) static int ip6_route_info_append(struct net *net, struct list_head *rt6_nh_list, - struct rt6_info *rt, struct fib6_config *r_cfg) + struct fib6_info *rt, + struct fib6_config *r_cfg) { struct rt6_nh *nh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { - /* check if rt6_info already exists */ - if (rt6_duplicate_nexthop(nh->rt6_info, rt)) + /* check if fib6_info already exists */ + if (rt6_duplicate_nexthop(nh->fib6_info, rt)) return err; } nh = kzalloc(sizeof(*nh), GFP_KERNEL); if (!nh) return -ENOMEM; - nh->rt6_info = rt; + nh->fib6_info = rt; err = ip6_convert_metrics(net, rt, r_cfg); if (err) { kfree(nh); @@ -4199,8 +4200,8 @@ static int ip6_route_info_append(struct net *net, return 0; } -static void ip6_route_mpath_notify(struct rt6_info *rt, - struct rt6_info *rt_last, +static void ip6_route_mpath_notify(struct fib6_info *rt, + struct fib6_info *rt_last, struct nl_info *info, __u16 nlflags) { @@ -4212,7 +4213,7 @@ static void ip6_route_mpath_notify(struct rt6_info *rt, */ if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { rt = list_first_entry(&rt_last->rt6i_siblings, - struct rt6_info, + struct fib6_info, rt6i_siblings); } @@ -4223,11 +4224,11 @@ static void ip6_route_mpath_notify(struct rt6_info *rt, static int ip6_route_multipath_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { - struct rt6_info *rt_notif = NULL, *rt_last = NULL; + struct fib6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; struct fib6_config r_cfg; struct rtnexthop *rtnh; - struct rt6_info *rt; + struct fib6_info *rt; struct rt6_nh *err_nh; struct rt6_nh *nh, *nh_safe; __u16 nlflags; @@ -4247,7 +4248,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry and build a list (rt6_nh_list) of - * rt6_info structs per nexthop + * fib6_info structs per nexthop */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); @@ -4297,16 +4298,16 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { - rt_last = nh->rt6_info; - err = __ip6_ins_rt(nh->rt6_info, info, extack); - fib6_info_release(nh->rt6_info); + rt_last = nh->fib6_info; + err = __ip6_ins_rt(nh->fib6_info, info, extack); + fib6_info_release(nh->fib6_info); /* save reference to first route for notification */ if (!rt_notif && !err) - rt_notif = nh->rt6_info; + rt_notif = nh->fib6_info; - /* nh->rt6_info is used or freed at this point, reset to NULL*/ - nh->rt6_info = NULL; + /* nh->fib6_info is used or freed at this point, reset to NULL*/ + nh->fib6_info = NULL; if (err) { if (replace && nhn) ip6_print_replace_route_err(&rt6_nh_list); @@ -4347,8 +4348,8 @@ add_errout: cleanup: list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { - if (nh->rt6_info) - fib6_info_release(nh->rt6_info); + if (nh->fib6_info) + fib6_info_release(nh->fib6_info); list_del(&nh->next); kfree(nh); } @@ -4428,7 +4429,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, return ip6_route_add(&cfg, GFP_KERNEL, extack); } -static size_t rt6_nlmsg_size(struct rt6_info *rt) +static size_t rt6_nlmsg_size(struct fib6_info *rt) { int nexthop_len = 0; @@ -4458,7 +4459,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) + nexthop_len; } -static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, +static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt, unsigned int *flags, bool skip_oif) { if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) @@ -4495,7 +4496,7 @@ nla_put_failure: } /* add multipath next hop */ -static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) +static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt) { const struct net_device *dev = rt->fib6_nh.nh_dev; struct rtnexthop *rtnh; @@ -4523,7 +4524,7 @@ nla_put_failure: } static int rt6_fill_node(struct net *net, struct sk_buff *skb, - struct rt6_info *rt, struct dst_entry *dst, + struct fib6_info *rt, struct dst_entry *dst, struct in6_addr *dest, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, unsigned int flags) @@ -4613,7 +4614,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, * each as a nexthop within RTA_MULTIPATH. */ if (rt->rt6i_nsiblings) { - struct rt6_info *sibling, *next_sibling; + struct fib6_info *sibling, *next_sibling; struct nlattr *mp; mp = nla_nest_start(skb, RTA_MULTIPATH); @@ -4655,7 +4656,7 @@ nla_put_failure: return -EMSGSIZE; } -int rt6_dump_route(struct rt6_info *rt, void *p_arg) +int rt6_dump_route(struct fib6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; struct net *net = arg->net; @@ -4800,7 +4801,7 @@ errout: return err; } -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, +void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, unsigned int nlm_flags) { struct sk_buff *skb; -- cgit v1.2.3 From e5e0a59b7cd17244cb0c1d87112b0c9e5e2bfb39 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:15 -0700 Subject: bpf: make mlx4 compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for mlx4 driver we will just calculate packet's length unconditionally (the same way as it's already being done in mlx5) Acked-by: Alexei Starovoitov Acked-by: Tariq Toukan Signed-off-by: Nikita V. Shirokov Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 5c613c6663da..efc55feddc5c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -775,8 +775,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud act = bpf_prog_run_xdp(xdp_prog, &xdp); + length = xdp.data_end - xdp.data; if (xdp.data != orig_data) { - length = xdp.data_end - xdp.data; frags[0].page_offset = xdp.data - xdp.data_hard_start; va = xdp.data; -- cgit v1.2.3 From b968e735c79767a3c91217fbae691581aa557d8d Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:16 -0700 Subject: bpf: make bnxt compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for bnxt driver we will just calculate packet's length unconditionally Acked-by: Alexei Starovoitov Signed-off-by: Nikita V. Shirokov Acked-by: Michael Chan Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1389ab5e05df..1f0e872d0667 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -113,10 +113,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail != bp->tx_ring_size) *event &= ~BNXT_RX_EVENT; + *len = xdp.data_end - xdp.data; if (orig_data != xdp.data) { offset = xdp.data - xdp.data_hard_start; *data_ptr = xdp.data_hard_start + offset; - *len = xdp.data_end - xdp.data; } switch (act) { case XDP_PASS: -- cgit v1.2.3 From a48ce00f9aeb6ce1227e291c36a0fa4995273144 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:17 -0700 Subject: bpf: make cavium thunder compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for cavium's thunder driver we will just calculate packet's length unconditionally Acked-by: Alexei Starovoitov Signed-off-by: Nikita V. Shirokov Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 707db3304396..7135db45927e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -538,9 +538,9 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, action = bpf_prog_run_xdp(prog, &xdp); rcu_read_unlock(); + len = xdp.data_end - xdp.data; /* Check if XDP program has changed headers */ if (orig_data != xdp.data) { - len = xdp.data_end - xdp.data; offset = orig_data - xdp.data; dma_addr -= offset; } -- cgit v1.2.3 From 5a6a22e378244cb3aa0473abc31a5d1c2cb2327b Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:18 -0700 Subject: bpf: make netronome nfp compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for nfp driver we will just calculate packet's length unconditionally Acked-by: Alexei Starovoitov Acked-by: Jakub Kicinski Signed-off-by: Nikita V. Shirokov Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1eb6549f2a54..d9111c077699 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1722,7 +1722,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) act = bpf_prog_run_xdp(xdp_prog, &xdp); - pkt_len -= xdp.data - orig_data; + pkt_len = xdp.data_end - xdp.data; pkt_off += xdp.data - orig_data; switch (act) { -- cgit v1.2.3 From 8fb58f1ecf5cbcc7805dd339243b7d9837cbabbe Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:19 -0700 Subject: bpf: make tun compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for tun driver we need to adjust XDP_PASS handling by recalculating length of the packet if it was passed to the TCP/IP stack (in case if after xdp's prog run data_end pointer was adjusted) Reviewed-by: Jason Wang Signed-off-by: Nikita V. Shirokov Acked-by: Michael S. Tsirkin Signed-off-by: Daniel Borkmann --- drivers/net/tun.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 1e58be152d5c..901351a6ed21 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1696,6 +1696,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, return NULL; case XDP_PASS: delta = orig_data - xdp.data; + len = xdp.data_end - xdp.data; break; default: bpf_warn_invalid_xdp_action(act); @@ -1716,7 +1717,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, } skb_reserve(skb, pad - delta); - skb_put(skb, len + delta); + skb_put(skb, len); get_page(alloc_frag->page); alloc_frag->offset += buflen; -- cgit v1.2.3 From 6870de435b90c083ae0f3f7f341287976ef56f03 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Tue, 17 Apr 2018 21:42:20 -0700 Subject: bpf: make virtio compatible w/ bpf_xdp_adjust_tail w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as well (only "decrease" of pointer's location is going to be supported). changing of this pointer will change packet's size. for virtio driver we need to adjust XDP_PASS handling by recalculating length of the packet if it was passed to the TCP/IP stack Reviewed-by: Jason Wang Signed-off-by: Nikita V. Shirokov Signed-off-by: Daniel Borkmann --- drivers/net/virtio_net.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 01694e26f03e..779a4f798522 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -606,6 +606,7 @@ static struct sk_buff *receive_small(struct net_device *dev, case XDP_PASS: /* Recalculate length in case bpf program changed it */ delta = orig_data - xdp.data; + len = xdp.data_end - xdp.data; break; case XDP_TX: xdpf = convert_to_xdp_frame(&xdp); @@ -642,7 +643,7 @@ static struct sk_buff *receive_small(struct net_device *dev, goto err; } skb_reserve(skb, headroom - delta); - skb_put(skb, len + delta); + skb_put(skb, len); if (!delta) { buf += header_offset; memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); @@ -757,6 +758,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, offset = xdp.data - page_address(xdp_page) - vi->hdr_len; + /* recalculate len if xdp.data or xdp.data_end were + * adjusted + */ + len = xdp.data_end - xdp.data; /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); -- cgit v1.2.3 From 2953586df76e76ec2e62ed05897c1d242c06874d Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:32 -0400 Subject: net: netcp: ethss: use macro for checking ss_version consistently Driver currently uses macro for NU and XBE hardwrae, while other places for older hardware such as that on K2H/K SoC (version 1.4 of the cpsw hardware, it explicitly check for the ss_version inline. Add a new macro for version 1.4 and use it to customize code in the driver. While at it also fix similar issue with checking XBE version by re-using existing macro IS_SS_ID_XGBE(). Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 56dbc0b9fedc..1b79fe51c747 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -42,7 +42,7 @@ /* 1G Ethernet SS defines */ #define GBE_MODULE_NAME "netcp-gbe" -#define GBE_SS_VERSION_14 0x4ed21104 +#define GBE_SS_VERSION_14 0x4ed2 #define GBE_SS_REG_INDEX 0 #define GBE_SGMII34_REG_INDEX 1 @@ -72,6 +72,9 @@ #define IS_SS_ID_NU(d) \ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) +#define IS_SS_ID_VER_14(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14) + #define GBENU_SS_REG_INDEX 0 #define GBENU_SM_REG_INDEX 1 #define GBENU_SGMII_MODULE_OFFSET 0x100 @@ -86,7 +89,7 @@ /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" -#define XGBE_SS_VERSION_10 0x4ee42100 +#define XGBE_SS_VERSION_10 0x4ee4 #define XGBE_SS_REG_INDEX 0 #define XGBE_SM_REG_INDEX 1 @@ -1915,7 +1918,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev, gbe_dev = gbe_intf->gbe_dev; spin_lock_bh(&gbe_dev->hw_stats_lock); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_update_stats_ver14(gbe_dev, data); else gbe_update_stats(gbe_dev, data); @@ -2205,7 +2208,7 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, max_rx_len = NETCP_MAX_FRAME_SIZE; /* Enable correct MII mode at SS level */ - if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && + if (IS_SS_ID_XGBE(gbe_dev) && (slave->link_interface >= XGMII_LINK_MAC_PHY)) { xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); xgmii_mode |= (1 << slave->slave_num); @@ -2293,7 +2296,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) } if (has_phy) { - if (priv->ss_version == XGBE_SS_VERSION_10) + if (IS_SS_ID_XGBE(priv)) hndlr = xgbe_adjust_link; slave->phy = of_phy_connect(gbe_intf->ndev, @@ -2764,7 +2767,7 @@ static void netcp_ethss_timer(struct timer_list *t) /* A timer runs as a BH, no need to block them */ spin_lock(&gbe_dev->hw_stats_lock); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_update_stats_ver14(gbe_dev, NULL); else gbe_update_stats(gbe_dev, NULL); @@ -2807,7 +2810,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) GBE_RTL_VERSION(reg), GBE_IDENT(reg)); /* For 10G and on NetCP 1.5, use directed to port */ - if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) + if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev)) gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; if (gbe_dev->enable_ale) @@ -2924,7 +2927,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, /* Emac regs memmap are contiguous but port regs are not */ port_reg_num = slave->slave_num; - if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + if (IS_SS_ID_VER_14(gbe_dev)) { if (slave->slave_num > 1) { port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; port_reg_num -= 2; @@ -2939,7 +2942,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, emac_reg_ofs = GBENU_EMAC_OFFSET; port_reg_blk_sz = 0x1000; emac_reg_blk_sz = 0x1000; - } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + } else if (IS_SS_ID_XGBE(gbe_dev)) { port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; emac_reg_ofs = XGBE10_EMAC_OFFSET; port_reg_blk_sz = 0x30; @@ -2955,7 +2958,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + (emac_reg_blk_sz * slave->slave_num); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + if (IS_SS_ID_VER_14(gbe_dev)) { /* Initialize slave port register offsets */ GBE_SET_REG_OFS(slave, port_regs, port_vlan); GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); @@ -2989,7 +2992,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, GBENU_SET_REG_OFS(slave, emac_regs, mac_control); GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); - } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + } else if (IS_SS_ID_XGBE(gbe_dev)) { /* Initialize slave port register offsets */ XGBE_SET_REG_OFS(slave, port_regs, port_vlan); XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); @@ -3508,7 +3511,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) ret = set_gbe_ethss14_priv(gbe_dev, node); else if (IS_SS_ID_MU(gbe_dev)) ret = set_gbenu_ethss_priv(gbe_dev, node); @@ -3606,7 +3609,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, spin_lock_bh(&gbe_dev->hw_stats_lock); for (i = 0; i < gbe_dev->num_stats_mods; i++) { - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_reset_mod_stats_ver14(gbe_dev, i); else gbe_reset_mod_stats(gbe_dev, i); -- cgit v1.2.3 From 775f9535dc5e53ef57d0ebd17e1d7a94c599ecce Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:33 -0400 Subject: net: netcp: ethss: make sgmii configuration conditional As a preparatory patch to add support for 2u cpsw hardware found on K2G SoC, make sgmii configuration conditional. This is required since 2u uses RGMII interface instead of SGMII and to allow for driver re-use. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 1b79fe51c747..e1e67ff9eb86 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -74,6 +74,8 @@ #define IS_SS_ID_VER_14(d) \ (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14) +#define IS_SS_ID_2U(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U) #define GBENU_SS_REG_INDEX 0 #define GBENU_SM_REG_INDEX 1 @@ -2239,7 +2241,8 @@ static void gbe_slave_stop(struct gbe_intf *intf) struct gbe_priv *gbe_dev = intf->gbe_dev; struct gbe_slave *slave = intf->slave; - gbe_sgmii_rtreset(gbe_dev, slave, true); + if (!IS_SS_ID_2U(gbe_dev)) + gbe_sgmii_rtreset(gbe_dev, slave, true); gbe_port_reset(slave); /* Disable forwarding */ cpsw_ale_control_set(gbe_dev->ale, slave->port_num, @@ -2274,9 +2277,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) void (*hndlr)(struct net_device *) = gbe_adjust_link; - gbe_sgmii_config(priv, slave); + if (!IS_SS_ID_2U(priv)) + gbe_sgmii_config(priv, slave); gbe_port_reset(slave); - gbe_sgmii_rtreset(priv, slave, false); + if (!IS_SS_ID_2U(priv)) + gbe_sgmii_rtreset(priv, slave, false); gbe_port_config(priv, slave, priv->rx_packet_max); gbe_set_slave_mac(slave, gbe_intf); /* enable forwarding */ @@ -3042,7 +3047,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, continue; } - gbe_sgmii_config(gbe_dev, slave); + if (!IS_SS_ID_2U(gbe_dev)) + gbe_sgmii_config(gbe_dev, slave); gbe_port_reset(slave); gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); @@ -3399,7 +3405,9 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, } gbe_dev->switch_regs = regs; - gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + if (!IS_SS_ID_2U(gbe_dev)) + gbe_dev->sgmii_port_regs = + gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; /* Although sgmii modules are mem mapped to one contiguous * region on GBENU devices, setting sgmii_port34_regs allows -- cgit v1.2.3 From 478e9a5fcf3fda621f3d660979c42d24f3435d73 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:34 -0400 Subject: net: netcp: ethss: add support for handling rgmii link interface 2u cpsw hardware on K2G uses rgmii link to interface with Phy. So add support for this interface in the code so that driver can be re-used for 2u hardware. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp.h | 2 ++ drivers/net/ethernet/ti/netcp_ethss.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 8900a6fad318..416f732a00f7 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -33,6 +33,8 @@ #define SGMII_LINK_MAC_MAC_FORCED 2 #define SGMII_LINK_MAC_FIBER 3 #define SGMII_LINK_MAC_PHY_NO_MDIO 4 +#define RGMII_LINK_MAC_PHY 5 +#define RGMII_LINK_MAC_PHY_NO_MDIO 7 #define XGMII_LINK_MAC_PHY 10 #define XGMII_LINK_MAC_MAC_FORCED 11 diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index e1e67ff9eb86..68b10b5f9a51 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2096,8 +2096,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE_FORWARD); if (ndev && slave->open && - slave->link_interface != SGMII_LINK_MAC_PHY && - slave->link_interface != XGMII_LINK_MAC_PHY) + ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && + (slave->link_interface != XGMII_LINK_MAC_PHY))) netif_carrier_on(ndev); } else { writel(mac_control, GBE_REG_ADDR(slave, emac_regs, @@ -2106,8 +2107,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); if (ndev && - slave->link_interface != SGMII_LINK_MAC_PHY && - slave->link_interface != XGMII_LINK_MAC_PHY) + ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && + (slave->link_interface != XGMII_LINK_MAC_PHY))) netif_carrier_off(ndev); } @@ -2921,6 +2923,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, slave->open = false; if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == RGMII_LINK_MAC_PHY) || (slave->link_interface == XGMII_LINK_MAC_PHY)) slave->phy_node = of_parse_phandle(node, "phy-handle", 0); slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); @@ -3082,6 +3085,9 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, if (slave->link_interface == SGMII_LINK_MAC_PHY) { phy_mode = PHY_INTERFACE_MODE_SGMII; slave->phy_port_t = PORT_MII; + } else if (slave->link_interface == RGMII_LINK_MAC_PHY) { + phy_mode = PHY_INTERFACE_MODE_RGMII; + slave->phy_port_t = PORT_MII; } else { phy_mode = PHY_INTERFACE_MODE_NA; slave->phy_port_t = PORT_FIBRE; @@ -3089,6 +3095,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_sec_slave(slave, gbe_dev) { if ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && (slave->link_interface != XGMII_LINK_MAC_PHY)) continue; slave->phy = -- cgit v1.2.3 From 7771f2b4d0f774de752346d6340cd79c1d0b0477 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:35 -0400 Subject: net: netcp: ethss: use rgmii link status for 2u cpsw hardware Introduce rgmii link status to handle link state events for 2u cpsw hardware on K2G. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 68b10b5f9a51..83acb728a8f7 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -171,6 +171,11 @@ #define GBE_RXHOOK_ORDER 0 #define GBE_DEFAULT_ALE_AGEOUT 30 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) +#define SLAVE_LINK_IS_RGMII(s) \ + (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \ + ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO)) +#define SLAVE_LINK_IS_SGMII(s) \ + ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO) #define NETCP_LINK_STATE_INVALID -1 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ @@ -554,6 +559,7 @@ struct gbe_ss_regs { struct gbe_ss_regs_ofs { u16 id_ver; u16 control; + u16 rgmii_status; /* 2U */ }; struct gbe_switch_regs { @@ -2122,23 +2128,35 @@ static bool gbe_phy_link_status(struct gbe_slave *slave) return !slave->phy || slave->phy->link; } +#define RGMII_REG_STATUS_LINK BIT(0) + +static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status) +{ + u32 val = 0; + + val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status)); + *status = !!(val & RGMII_REG_STATUS_LINK); +} + static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, struct gbe_slave *slave, struct net_device *ndev) { - int sp = slave->slave_num; - int phy_link_state, sgmii_link_state = 1, link_state; + bool sw_link_state = true, phy_link_state; + int sp = slave->slave_num, link_state; if (!slave->open) return; - if (!SLAVE_LINK_IS_XGMII(slave)) { - sgmii_link_state = - netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); - } + if (SLAVE_LINK_IS_RGMII(slave)) + netcp_2u_rgmii_get_port_link(gbe_dev, + &sw_link_state); + if (SLAVE_LINK_IS_SGMII(slave)) + sw_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); phy_link_state = gbe_phy_link_status(slave); - link_state = phy_link_state & sgmii_link_state; + link_state = phy_link_state & sw_link_state; if (atomic_xchg(&slave->link_state, link_state) != link_state) netcp_ethss_link_state_action(gbe_dev, ndev, slave, @@ -3437,6 +3455,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, /* Subsystem registers */ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + /* ok to set for MU, but used by 2U only */ + GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status); /* Switch module registers */ GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); -- cgit v1.2.3 From 65c450642c4991ab456ebd157e14d52c81106695 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:36 -0400 Subject: net: netcp: ethss: map vlan priorities to zero flow The driver currently support only vlan priority zero. So map the vlan priorities to zero flow in hardware. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 83acb728a8f7..91935e589b08 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -602,6 +602,7 @@ struct gbe_port_regs { struct gbe_port_regs_ofs { u16 port_vlan; u16 tx_pri_map; + u16 rx_pri_map; u16 sa_lo; u16 sa_hi; u16 ts_ctl; @@ -2304,6 +2305,13 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) gbe_sgmii_rtreset(priv, slave, false); gbe_port_config(priv, slave, priv->rx_packet_max); gbe_set_slave_mac(slave, gbe_intf); + /* For NU & 2U switch, map the vlan priorities to zero + * as we only configure to use priority 0 + */ + if (IS_SS_ID_MU(priv)) + writel(HOST_TX_PRI_MAP_DEFAULT, + GBE_REG_ADDR(slave, port_regs, rx_pri_map)); + /* enable forwarding */ cpsw_ale_control_set(priv->ale, slave->port_num, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); @@ -3005,6 +3013,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, /* Initialize slave port register offsets */ GBENU_SET_REG_OFS(slave, port_regs, port_vlan); GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map); GBENU_SET_REG_OFS(slave, port_regs, sa_lo); GBENU_SET_REG_OFS(slave, port_regs, sa_hi); GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); -- cgit v1.2.3 From dc07ec97368f064102a78bddbfba4574ed7cb73d Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:37 -0400 Subject: net: netcp: ethss: re-use stats handling code for 2u hardware The stats block in 2u cpsw hardware is similar to the one on nu and hence handle it in a similar way by using a macro that includes 2u hardware as well. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 91935e589b08..d982dcbe17b5 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3398,7 +3398,7 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->num_stats_mods = gbe_dev->max_num_ports; gbe_dev->et_stats = gbenu_et_stats; - if (IS_SS_ID_NU(gbe_dev)) + if (IS_SS_ID_MU(gbe_dev)) gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); else -- cgit v1.2.3 From 7b647b93b359d590094e0787fc86a6a9d57b25f7 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:38 -0400 Subject: net: netcp: ethss: use of_get_phy_mode() to support different RGMII modes The phy used for K2G allows for internal delays to be added optionally to the clock circuitry based on board desing. To add this support, enhance the driver to use of_get_phy_mode() to read the phy-mode from the phy device and pass the same to phy through of_phy_connect(). Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index d982dcbe17b5..5bc792175e4e 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -707,6 +708,7 @@ struct gbe_slave { u32 link_interface; u32 mac_control; u8 phy_port_t; + struct device_node *node; struct device_node *phy_node; struct ts_ctl ts_ctl; struct list_head slave_list; @@ -2322,6 +2324,21 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) has_phy = true; phy_mode = PHY_INTERFACE_MODE_SGMII; slave->phy_port_t = PORT_MII; + } else if (slave->link_interface == RGMII_LINK_MAC_PHY) { + has_phy = true; + phy_mode = of_get_phy_mode(slave->node); + /* if phy-mode is not present, default to + * PHY_INTERFACE_MODE_RGMII + */ + if (phy_mode < 0) + phy_mode = PHY_INTERFACE_MODE_RGMII; + + if (!phy_interface_mode_is_rgmii(phy_mode)) { + dev_err(priv->dev, + "Unsupported phy mode %d\n", phy_mode); + return -EINVAL; + } + slave->phy_port_t = PORT_MII; } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { has_phy = true; phy_mode = PHY_INTERFACE_MODE_NA; @@ -2947,6 +2964,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, slave->link_interface = SGMII_LINK_MAC_PHY; } + slave->node = node; slave->open = false; if ((slave->link_interface == SGMII_LINK_MAC_PHY) || (slave->link_interface == RGMII_LINK_MAC_PHY) || -- cgit v1.2.3 From c52b6782d00e4de299647dc6448aa844a9f4941b Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:39 -0400 Subject: Revert "net: netcp: remove dead code from the driver" As the probe sequence is not guaranteed contrary to the assumption of the commit 2d8e276a9030, same has to be reverted. commit 2d8e276a9030 ("net: netcp: remove dead code from the driver") Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index f5a7eb22d0f5..9c51b25d7fd0 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2155,6 +2155,7 @@ static int netcp_probe(struct platform_device *pdev) struct device_node *child, *interfaces; struct netcp_device *netcp_device; struct device *dev = &pdev->dev; + struct netcp_module *module; int ret; if (!node) { @@ -2203,6 +2204,14 @@ static int netcp_probe(struct platform_device *pdev) /* Add the device instance to the list */ list_add_tail(&netcp_device->device_list, &netcp_devices); + /* Probe & attach any modules already registered */ + mutex_lock(&netcp_modules_lock); + for_each_netcp_module(module) { + ret = netcp_module_probe(netcp_device, module); + if (ret < 0) + dev_err(dev, "module(%s) probe failed\n", module->name); + } + mutex_unlock(&netcp_modules_lock); return 0; probe_quit_interface: -- cgit v1.2.3 From 21f706bb10cf7d905d89ceb3f689b8b844520127 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 17 Apr 2018 17:30:40 -0400 Subject: net: netcp: support probe deferral The netcp driver shouldn't proceed until the knav qmss and dma devices are ready. So return -EPROBE_DEFER if these devices are not ready. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9c51b25d7fd0..736f6f713c09 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2158,6 +2158,10 @@ static int netcp_probe(struct platform_device *pdev) struct netcp_module *module; int ret; + if (!knav_dma_device_ready() || + !knav_qmss_device_ready()) + return -EPROBE_DEFER; + if (!node) { dev_err(dev, "could not find device info\n"); return -ENODEV; -- cgit v1.2.3 From 0542a87c546726510e7a572b16c5493ca42024c4 Mon Sep 17 00:00:00 2001 From: WingMan Kwok Date: Tue, 17 Apr 2018 17:30:41 -0400 Subject: net: netcp: add api to support set rx mode in netcp modules This patch adds an API to support setting rx mode in netcp modules. If a netcp module needs to be notified when upper layer transitions from one rx mode to another and react accordingly, such a module will implement the new API set_rx_mode added in this patch. Currently rx modes supported are PROMISCUOUS and NON_PROMISCUOUS modes. Signed-off-by: WingMan Kwok Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp.h | 1 + drivers/net/ethernet/ti/netcp_core.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 416f732a00f7..c4ffdf47bad5 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -214,6 +214,7 @@ struct netcp_module { int (*add_vid)(void *intf_priv, int vid); int (*del_vid)(void *intf_priv, int vid); int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); + int (*set_rx_mode)(void *intf_priv, bool promisc); /* used internally */ struct list_head module_list; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 736f6f713c09..e40aa3e31af2 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1509,6 +1509,24 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) } } +static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc) +{ + struct netcp_intf_modpriv *priv; + struct netcp_module *module; + int error; + + for_each_module(netcp, priv) { + module = priv->netcp_module; + if (!module->set_rx_mode) + continue; + + error = module->set_rx_mode(priv->module_priv, promisc); + if (error) + return error; + } + return 0; +} + static void netcp_set_rx_mode(struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); @@ -1538,6 +1556,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) /* finally sweep and callout into modules */ netcp_addr_sweep_del(netcp); netcp_addr_sweep_add(netcp); + netcp_set_promiscuous(netcp, promisc); spin_unlock(&netcp->lock); } -- cgit v1.2.3 From 8585661b1a4c6924c4c7e7abe9beec0bcf53eab7 Mon Sep 17 00:00:00 2001 From: WingMan Kwok Date: Tue, 17 Apr 2018 17:30:42 -0400 Subject: net: netcp: ethss: k2g: add promiscuous mode support This patch adds support for promiscuous mode in k2g's network driver. When upper layer instructs to transition from non-promiscuous mode to promiscuous mode or vice versa K2G network driver needs to configure ALE accordingly so that in case of non-promiscuous mode, ALE will not flood all unicast packets to host port, while in promiscuous mode, it will pass all received unicast packets to host port. Signed-off-by: WingMan Kwok Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 5bc792175e4e..6a728d35e776 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2775,6 +2775,61 @@ static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req) } #endif /* CONFIG_TI_CPTS */ +static int gbe_set_rx_mode(void *intf_priv, bool promisc) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + struct cpsw_ale *ale = gbe_dev->ale; + unsigned long timeout; + int i, ret = -ETIMEDOUT; + + /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and + * slaves are port 1 and up + */ + for (i = 0; i <= gbe_dev->num_slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, !!promisc); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, !!promisc); + } + + if (!promisc) { + /* Don't Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); + dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n"); + return 0; + } + + timeout = jiffies + HZ; + + /* Clear All Untouched entries */ + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + do { + cpu_relax(); + if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) { + ret = 0; + break; + } + + } while (time_after(timeout, jiffies)); + + /* Make sure it is not a false timeout */ + if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) + return ret; + + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, + GBE_PORT_MASK(gbe_dev->ale_ports), + -1); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); + dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n"); + return ret; +} + static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) { struct gbe_intf *gbe_intf = intf_priv; @@ -3529,6 +3584,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, gbe_dev->max_num_slaves = 8; } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { gbe_dev->max_num_slaves = 1; + gbe_module.set_rx_mode = gbe_set_rx_mode; } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { gbe_dev->max_num_slaves = 2; } else { -- cgit v1.2.3 From 008b4a938d785159dc060da7aec06f66449a4ef8 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:17:13 +0200 Subject: r8169: remove unused member features from struct Member features of struct rtl8169_private isn't used any longer since commit 6c6aa15fdea5 "r8169: improve interrupt handling", so remove it. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7d4e68907055..5cd10ee62f20 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -832,8 +832,6 @@ struct rtl8169_private { struct work_struct work; } wk; - unsigned features; - struct mii_if_info mii; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; -- cgit v1.2.3 From 1cbbf01cfe251447d232ccd6b031252bc5fa4b80 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:18:05 +0200 Subject: r8169: remove member align from struct rtl_cfg_info Since commit 6f0333b8fde4 "r8169: use 50% less ram for RX ring" member align isn't used any longer, so remove it. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 5cd10ee62f20..14f494a9318a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8016,7 +8016,6 @@ static const struct net_device_ops rtl_netdev_ops = { static const struct rtl_cfg_info { void (*hw_start)(struct net_device *); unsigned int region; - unsigned int align; u16 event_slow; unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; @@ -8025,7 +8024,6 @@ static const struct rtl_cfg_info { [RTL_CFG_0] = { .hw_start = rtl_hw_start_8169, .region = 1, - .align = 0, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, @@ -8034,7 +8032,6 @@ static const struct rtl_cfg_info { [RTL_CFG_1] = { .hw_start = rtl_hw_start_8168, .region = 2, - .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, @@ -8043,7 +8040,6 @@ static const struct rtl_cfg_info { [RTL_CFG_2] = { .hw_start = rtl_hw_start_8101, .region = 2, - .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, .coalesce_info = rtl_coalesce_info_8168_8136, -- cgit v1.2.3 From 8a67aa868c7a3e6ef92b4e355abd7a2836573aa9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:19:07 +0200 Subject: r8169: use skb_copy_to_linear_data in rtl8169_try_rx_copy Not a giant leap for mankind, but let's avoid the open-coded memcpy and use standard helper skb_copy_to_linear_data instead. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 14f494a9318a..8c303617cc37 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7359,7 +7359,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, prefetch(data); skb = napi_alloc_skb(&tp->napi, pkt_size); if (skb) - memcpy(skb->data, data, pkt_size); + skb_copy_to_linear_data(skb, data, pkt_size); dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); return skb; -- cgit v1.2.3 From 37621493b873b3701129333117da683b8e7a9da6 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:20:03 +0200 Subject: r8169: use constant NAPI_POLL_WAIT We can use generic constant NAPI_POLL_WAIT instead of defining an own constant for the same value. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8c303617cc37..cc88e8ee45c7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -88,7 +88,6 @@ static const int multicast_filter_limit = 32; #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ #define R8169_REGS_SIZE 256 -#define R8169_NAPI_WEIGHT 64 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) @@ -8316,7 +8315,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &rtl8169_ethtool_ops; dev->watchdog_timeo = RTL8169_TX_TIMEOUT; - netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); /* don't enable SG, IP_CSUM and TSO by default - it might not work * properly for all devices */ -- cgit v1.2.3 From 9a899a35b0d6b28b31cb04e042a349ef2fe18557 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:21:01 +0200 Subject: r8169: switch to napi_schedule_irqoff napi_schedule() is called from hard irq context, so we can switch to napi_schedule_irqoff() and avoid some overhead. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index cc88e8ee45c7..34447a2e3763 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7473,7 +7473,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) handled = 1; rtl_irq_disable(tp); - napi_schedule(&tp->napi); + napi_schedule_irqoff(&tp->napi); } } return IRQ_RETVAL(handled); -- cgit v1.2.3 From d3b404c29c97ecf56001dc94f94a5b8e5efcd2e3 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:22:14 +0200 Subject: r8169: simplify rtl8169_alloc_rx_data dev->dev.parent has the same value as tp_to_dev(tp) (set by SET_NETDEV_DEV() in rtl_init_one()) and we know it can't be NULL. This allows us to simplify the code. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 34447a2e3763..0216ca71d1aa 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6731,8 +6731,7 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, void *data; dma_addr_t mapping; struct device *d = tp_to_dev(tp); - struct net_device *dev = tp->dev; - int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + int node = dev_to_node(d); data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); if (!data) -- cgit v1.2.3 From b1127e641e8986212620111ee5f78a24cbe8ae27 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:23:35 +0200 Subject: r8169: improve rtl8169_init_ring This function doesn't use the net_device, therefore change the parameter to type struct rtl8169_private * to simplify the code. In addition we don't need the calculations in the memset statements, we can use the size of the arrays directly. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0216ca71d1aa..3971d089ee89 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6803,14 +6803,12 @@ err_out: return -ENOMEM; } -static int rtl8169_init_ring(struct net_device *dev) +static int rtl8169_init_ring(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_init_ring_indexes(tp); - memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); - memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); return rtl8169_rx_fill(tp); } @@ -7678,7 +7676,7 @@ static int rtl_open(struct net_device *dev) if (!tp->RxDescArray) goto err_free_tx_0; - retval = rtl8169_init_ring(dev); + retval = rtl8169_init_ring(tp); if (retval < 0) goto err_free_rx_1; -- cgit v1.2.3 From 1528192f8975dbe741de9f9d05c492f81ab7fa71 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:24:43 +0200 Subject: r8169: remove unneeded check in rtl8169_rx_fill rtl8169_rx_fill() is called only once and directly before the call array tp->Rx_databuff[] is filled with zero's. Therefore we don't need this check. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3971d089ee89..d2d0940e7253 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6784,9 +6784,6 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp) for (i = 0; i < NUM_RX_DESC; i++) { void *data; - if (tp->Rx_databuff[i]) - continue; - data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); if (!data) { rtl8169_make_unusable_by_asic(tp->RxDescArray + i); -- cgit v1.2.3 From 1d0254dd9b44dd187cbbdfe42f9b2cb6b4aa36a4 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:25:46 +0200 Subject: r8169: replace rx_buf_sz with a constant rx_buf_sz is constant, so we don't have to pass it as parameter and in general can replace it with a constant. When working on this I noticed that also before in rtl_set_rx_max_size() a value of 0x4000 is set, what is not in line with the chip spec. According to the spec only bits 0..13 are used and we set an effective value of zero therefore. However, the driver still seems to work and due to potential side effects I'm reluctant to make a change. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 37 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d2d0940e7253..c68771d0e89b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -88,6 +88,7 @@ static const int multicast_filter_limit = 32; #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ #define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) @@ -343,7 +344,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -static int rx_buf_sz = 16383; static int use_dac = -1; static struct { u32 msg_enable; @@ -5385,10 +5385,10 @@ static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) return cmd; } -static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz) +static void rtl_set_rx_max_size(struct rtl8169_private *tp) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(tp, RxMaxSize, rx_buf_sz + 1); + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); } static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) @@ -5489,7 +5489,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W8(tp, EarlyTxThres, NoEarlyTx); - rtl_set_rx_max_size(tp, rx_buf_sz); + rtl_set_rx_max_size(tp); if (tp->mac_version == RTL_GIGA_MAC_VER_01 || tp->mac_version == RTL_GIGA_MAC_VER_02 || @@ -6329,7 +6329,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp, rx_buf_sz); + rtl_set_rx_max_size(tp); tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1; @@ -6613,7 +6613,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp, rx_buf_sz); + rtl_set_rx_max_size(tp); tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); @@ -6695,29 +6695,28 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, void **data_buff, struct RxDesc *desc) { - dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), rx_buf_sz, - DMA_FROM_DEVICE); + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); kfree(*data_buff); *data_buff = NULL; rtl8169_make_unusable_by_asic(desc); } -static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +static inline void rtl8169_mark_to_asic(struct RxDesc *desc) { u32 eor = le32_to_cpu(desc->opts1) & RingEnd; /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); + desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); } -static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, - u32 rx_buf_sz) +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping) { desc->addr = cpu_to_le64(mapping); - rtl8169_mark_to_asic(desc, rx_buf_sz); + rtl8169_mark_to_asic(desc); } static inline void *rtl8169_align(void *data) @@ -6733,18 +6732,18 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, struct device *d = tp_to_dev(tp); int node = dev_to_node(d); - data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node); if (!data) return NULL; if (rtl8169_align(data) != data) { kfree(data); - data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node); if (!data) return NULL; } - mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { if (net_ratelimit()) @@ -6752,7 +6751,7 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, goto err_out; } - rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + rtl8169_map_to_asic(desc, mapping); return data; err_out: @@ -6864,7 +6863,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) rtl8169_hw_reset(tp); for (i = 0; i < NUM_RX_DESC; i++) - rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + rtl8169_mark_to_asic(tp->RxDescArray + i); rtl8169_tx_clear(tp); rtl8169_init_ring_indexes(tp); @@ -7444,7 +7443,7 @@ process_pkt: } release_descriptor: desc->opts2 = 0; - rtl8169_mark_to_asic(desc, rx_buf_sz); + rtl8169_mark_to_asic(desc); } count = cur_rx - tp->cur_rx; -- cgit v1.2.3 From d731af7822cc80ee2735ac2188023c95339c136b Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:26:41 +0200 Subject: r8169: remove rtl8169_map_to_asic This function is very simple and used only once, so we can inline the two statements. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c68771d0e89b..347c61521e56 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6713,12 +6713,6 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc) desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); } -static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping) -{ - desc->addr = cpu_to_le64(mapping); - rtl8169_mark_to_asic(desc); -} - static inline void *rtl8169_align(void *data) { return (void *)ALIGN((long)data, 16); @@ -6751,7 +6745,8 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, goto err_out; } - rtl8169_map_to_asic(desc, mapping); + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); return data; err_out: -- cgit v1.2.3 From 61cb532d5224b388145582dbe40cb0ae7a57c7e6 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:27:38 +0200 Subject: r8169: change hw_start argument type Code can be simplified by changing the argument type of hw_start callbacks from struct net_device * to struct rtl8169_private *. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 41 +++++++++++++----------------------- 1 file changed, 15 insertions(+), 26 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 347c61521e56..7a513b97e1e5 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -819,7 +819,7 @@ struct rtl8169_private { int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); void (*phy_reset_enable)(struct rtl8169_private *tp); - void (*hw_start)(struct net_device *); + void (*hw_start)(struct rtl8169_private *tp); unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); unsigned int (*link_ok)(struct rtl8169_private *tp); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); @@ -5354,12 +5354,9 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) (InterFrameGap << TxInterFrameGapShift)); } -static void rtl_hw_start(struct net_device *dev) +static void rtl_hw_start(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - tp->hw_start(dev); - + tp->hw_start(tp); rtl_irq_enable_all(tp); } @@ -5468,14 +5465,11 @@ static void rtl_set_rx_mode(struct net_device *dev) RTL_W32(tp, RxConfig, tmp); } -static void rtl_hw_start_8169(struct net_device *dev) +static void rtl_hw_start_8169(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - if (tp->mac_version == RTL_GIGA_MAC_VER_05) { RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW); - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); } RTL_W8(tp, Cfg9346, Cfg9346_Unlock); @@ -5533,7 +5527,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W32(tp, RxMissed, 0); - rtl_set_rx_mode(dev); + rtl_set_rx_mode(tp->dev); /* no early-rx interrupts */ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); @@ -6321,10 +6315,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe860, data); } -static void rtl_hw_start_8168(struct net_device *dev) +static void rtl_hw_start_8168(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -6449,7 +6441,7 @@ static void rtl_hw_start_8168(struct net_device *dev) default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - dev->name, tp->mac_version); + tp->dev->name, tp->mac_version); break; } @@ -6457,7 +6449,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_mode(dev); + rtl_set_rx_mode(tp->dev); RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } @@ -6596,17 +6588,14 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) rtl_pcie_state_l2l3_enable(tp, false); } -static void rtl_hw_start_8101(struct net_device *dev) +static void rtl_hw_start_8101(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - if (tp->mac_version >= RTL_GIGA_MAC_VER_30) tp->event_slow &= ~RxFIFOOver; if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN); RTL_W8(tp, Cfg9346, Cfg9346_Unlock); @@ -6664,7 +6653,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_mode(dev); + rtl_set_rx_mode(tp->dev); RTL_R8(tp, IntrMask); @@ -6864,7 +6853,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) rtl8169_init_ring_indexes(tp); napi_enable(&tp->napi); - rtl_hw_start(dev); + rtl_hw_start(tp); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp); } @@ -7694,7 +7683,7 @@ static int rtl_open(struct net_device *dev) rtl_pll_power_up(tp); - rtl_hw_start(dev); + rtl_hw_start(tp); if (!rtl8169_init_counter_offsets(dev)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); @@ -8001,7 +7990,7 @@ static const struct net_device_ops rtl_netdev_ops = { }; static const struct rtl_cfg_info { - void (*hw_start)(struct net_device *); + void (*hw_start)(struct rtl8169_private *tp); unsigned int region; u16 event_slow; unsigned int has_gmii:1; -- cgit v1.2.3 From e71c9ce262269ed12d042ea56da63a51c97fd880 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:28:28 +0200 Subject: r8169: change argument type of counters handling functions The counter handling functions don't deal with the net_device, so code can be simplified by changing the argument type to struct rtl8169_private *. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7a513b97e1e5..7ee7b67b7a08 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2151,9 +2151,8 @@ DECLARE_RTL_COND(rtl_counters_cond) return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); } -static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) +static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) { - struct rtl8169_private *tp = netdev_priv(dev); dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; @@ -2166,10 +2165,8 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } -static bool rtl8169_reset_counters(struct net_device *dev) +static bool rtl8169_reset_counters(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - /* * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the * tally counters. @@ -2177,13 +2174,11 @@ static bool rtl8169_reset_counters(struct net_device *dev) if (tp->mac_version < RTL_GIGA_MAC_VER_19) return true; - return rtl8169_do_counters(dev, CounterReset); + return rtl8169_do_counters(tp, CounterReset); } -static bool rtl8169_update_counters(struct net_device *dev) +static bool rtl8169_update_counters(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - /* * Some chips are unable to dump tally counters when the receiver * is disabled. @@ -2191,12 +2186,11 @@ static bool rtl8169_update_counters(struct net_device *dev) if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) return true; - return rtl8169_do_counters(dev, CounterDump); + return rtl8169_do_counters(tp, CounterDump); } -static bool rtl8169_init_counter_offsets(struct net_device *dev) +static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); struct rtl8169_counters *counters = tp->counters; bool ret = false; @@ -2219,10 +2213,10 @@ static bool rtl8169_init_counter_offsets(struct net_device *dev) return true; /* If both, reset and update fail, propagate to caller. */ - if (rtl8169_reset_counters(dev)) + if (rtl8169_reset_counters(tp)) ret = true; - if (rtl8169_update_counters(dev)) + if (rtl8169_update_counters(tp)) ret = true; tp->tc_offset.tx_errors = counters->tx_errors; @@ -2245,7 +2239,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, pm_runtime_get_noresume(d); if (pm_runtime_active(d)) - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); pm_runtime_put_noidle(d); @@ -7601,7 +7595,7 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); /* Update counters before going down */ - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); rtl_lock_work(tp); clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7685,7 +7679,7 @@ static int rtl_open(struct net_device *dev) rtl_hw_start(tp); - if (!rtl8169_init_counter_offsets(dev)) + if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); netif_start_queue(dev); @@ -7754,7 +7748,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) * from tally counters. */ if (pm_runtime_active(&pdev->dev)) - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); /* * Subtract values fetched during initalization. @@ -7850,7 +7844,7 @@ static int rtl8169_runtime_suspend(struct device *device) /* Update counters before going runtime suspend */ rtl8169_rx_missed(dev); - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); return 0; } -- cgit v1.2.3 From ebcd5daa7ffd6e8be53401bfce5143a26543afe5 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:29:20 +0200 Subject: r8169: change interrupt handler argument type Code can be a little simplified by switching the interrupt handler argument type to struct rtl8169_private *. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7ee7b67b7a08..ead853ac63de 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7432,8 +7432,7 @@ release_descriptor: static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { - struct net_device *dev = dev_instance; - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_instance; int handled = 0; u16 status; @@ -7605,7 +7604,7 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - pci_free_irq(pdev, 0, dev); + pci_free_irq(pdev, 0, tp); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -7660,7 +7659,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); - retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev, + retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, dev->name); if (retval < 0) goto err_release_fw_2; -- cgit v1.2.3 From 6202806e7c03a116a973f1a5ed770584768129f4 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:30:29 +0200 Subject: r8169: drop member opts1_mask from struct rtl8169_private We can get rid of member opts1_mask and in addition save a few cpu cycles in the hot path of rtl_rx(). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ead853ac63de..fe838112143a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -836,7 +836,6 @@ struct rtl8169_private { struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; - u32 opts1_mask; struct rtl_fw { const struct firmware *fw; @@ -7347,7 +7346,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget struct RxDesc *desc = tp->RxDescArray + entry; u32 status; - status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + status = le32_to_cpu(desc->opts1); if (status & DescOwn) break; @@ -7365,14 +7364,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget dev->stats.rx_length_errors++; if (status & RxCRC) dev->stats.rx_crc_errors++; - if (status & RxFOVF) { + /* RxFOVF is a reserved bit on later chip versions */ + if (tp->mac_version == RTL_GIGA_MAC_VER_01 && + status & RxFOVF) { rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); dev->stats.rx_fifo_errors++; - } - if ((status & (RxRUNT | RxCRC)) && - !(status & (RxRWT | RxFOVF)) && - (dev->features & NETIF_F_RXALL)) + } else if (status & (RxRUNT | RxCRC) && + !(status & RxRWT) && + dev->features & NETIF_F_RXALL) { goto process_pkt; + } } else { struct sk_buff *skb; dma_addr_t addr; @@ -8327,9 +8328,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? - ~(RxBOVF | RxFOVF) : ~0; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; -- cgit v1.2.3 From 2d6c5a61fad6c0f0092a333188ded27416471c5e Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:31:21 +0200 Subject: r8169: don't display tp->mmio_addr address For security reasons since commit ad67b74d2469 "printk: hash addresses printed with %p" %p doesn't display the full address any longer. We could switch to %px, but I think the pointer address doesn't provide a real benefit, so remove printing the hashed address. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fe838112143a..f81abbe2858d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8344,8 +8344,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc < 0) return rc; - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr, + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), pci_irq_vector(pdev, 0)); if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { -- cgit v1.2.3 From 90b989c5532190c6b30a0a2c6dc38beae6578b9c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:32:15 +0200 Subject: r8169: improve rtl8169_get_mac_version Certain entries in array mac_info[] are redundant, so remove them: 0x7cf, 0x2c200000 (VER 33): matched by entry 0x7c8, 0x2c000000 0x7cf, 0x28300000 (VER 26): matched by entry 0x7c8, 0x28000000 0x7cf, 0x3cb00000 (VER 24): matched by entry 0x7c8, 0x3c800000 0x7cf, 0x3c400000 (VER 22): matched by entry 0x7c8, 0x3c000000 0x7cf, 0x38500000 (VER 17): matched by entry 0x7c8, 0x38000000 0x7cf, 0x44900000 (VER 39): matched by entry 0x7c8, 0x44800000 0x7cf, 0x40b00000 (VER 30): matched by entry 0x7c8, 0x40800000 0x7cf, 0x40a00000 (VER 30): matched by entry 0x7c8, 0x40800000 0x7cf, 0x34a00000 (VER 09): matched by entry 0x7c8, 0x34800000 0x7cf, 0x24a00000 (VER 09): matched by entry 0x7c8, 0x24800000 In addition don't mask out bits 30 and 29 when printing the XID. Most likely this is a relict from the times when the driver covered RTL8169 chip version only. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f81abbe2858d..dbb7ba2c357d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2549,12 +2549,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, /* 8168E family. */ { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, - { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, /* 8168D family. */ - { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, @@ -2564,32 +2562,24 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, /* 8168C family. */ - { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, - { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, /* 8168B family. */ { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, - { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ - { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, - { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, - { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, @@ -8346,7 +8336,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, - (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), + (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), pci_irq_vector(pdev, 0)); if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " -- cgit v1.2.3 From a4328ddb55f27b7ed0ec0aa7e9509869bbbbf097 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:33:03 +0200 Subject: r8169: drop member txd_version from struct rtl8169_private txd_version is used in rtl_init_one() only, so we can drop member txd_version from struct rtl8169_private. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index dbb7ba2c357d..ccc57683ed1b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -776,7 +776,6 @@ struct rtl8169_private { struct net_device *dev; struct napi_struct napi; u32 msg_enable; - u16 txd_version; u16 mac_version; u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ @@ -8214,7 +8213,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl8169_print_mac_version(tp); chipset = tp->mac_version; - tp->txd_version = rtl_chip_infos[chipset].txd_version; rc = rtl_alloc_irq(tp); if (rc < 0) { @@ -8299,13 +8297,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (tp->txd_version == RTL_TD_0) + switch (rtl_chip_infos[chipset].txd_version) { + case RTL_TD_0: tp->tso_csum = rtl8169_tso_csum_v1; - else if (tp->txd_version == RTL_TD_1) { + break; + case RTL_TD_1: tp->tso_csum = rtl8169_tso_csum_v2; dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - } else + break; + default: WARN_ON_ONCE(1); + } dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; -- cgit v1.2.3 From c8d48d9c5bf7a0e884c17867eb8e0b30b7eae139 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:34:22 +0200 Subject: r8169: improve pci region handling The region to be used is always the first of type IORESOURCE_MEM. We can implement this rule directly w/o having to specify which region is the first one per configuration entry. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ccc57683ed1b..94e91d3c2233 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7974,7 +7974,6 @@ static const struct net_device_ops rtl_netdev_ops = { static const struct rtl_cfg_info { void (*hw_start)(struct rtl8169_private *tp); - unsigned int region; u16 event_slow; unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; @@ -7982,7 +7981,6 @@ static const struct rtl_cfg_info { } rtl_cfg_infos [] = { [RTL_CFG_0] = { .hw_start = rtl_hw_start_8169, - .region = 1, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, @@ -7990,7 +7988,6 @@ static const struct rtl_cfg_info { }, [RTL_CFG_1] = { .hw_start = rtl_hw_start_8168, - .region = 2, .event_slow = SYSErr | LinkChg | RxOverflow, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, @@ -7998,7 +7995,6 @@ static const struct rtl_cfg_info { }, [RTL_CFG_2] = { .hw_start = rtl_hw_start_8101, - .region = 2, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, .coalesce_info = rtl_coalesce_info_8168_8136, @@ -8098,11 +8094,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; - const unsigned int region = cfg->region; struct rtl8169_private *tp; struct mii_if_info *mii; struct net_device *dev; - int chipset, i; + int chipset, region, i; int rc; if (netif_msg_drv(&debug)) { @@ -8144,11 +8139,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pcim_set_mwi(pdev) < 0) netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); - /* make sure PCI base addr 1 is MMIO */ - if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { - netif_err(tp, probe, dev, - "region #%d not an MMIO resource, aborting\n", - region); + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + netif_err(tp, probe, dev, "no MMIO resource found\n"); return -ENODEV; } -- cgit v1.2.3 From 6ed0e08f9e2a90ab455d897e4679b903c82dac67 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Apr 2018 23:36:12 +0200 Subject: r8169: remove jumbo_tx_csum from chip config struct According to the chip configuration entries only RTL8169 (ver <= 06) supports tx checksumming for jumbo packets. By the way: constant JUMBO_1K is a little misleading because it refers to the standard packet size and not to a jumbo packet size. By implementing this rule we can get rid of configuring tx checksumming support per chip type. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 133 ++++++++++++++--------------------- 1 file changed, 54 insertions(+), 79 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 94e91d3c2233..fcd42d0387b9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -171,12 +171,11 @@ enum rtl_tx_desc_version { #define JUMBO_7K (7*1024 - ETH_HLEN - 2) #define JUMBO_9K (9*1024 - ETH_HLEN - 2) -#define _R(NAME,TD,FW,SZ,B) { \ +#define _R(NAME,TD,FW,SZ) { \ .name = NAME, \ .txd_version = TD, \ .fw_name = FW, \ .jumbo_max = SZ, \ - .jumbo_tx_csum = B \ } static const struct { @@ -184,135 +183,111 @@ static const struct { enum rtl_tx_desc_version txd_version; const char *fw_name; u16 jumbo_max; - bool jumbo_tx_csum; } rtl_chip_infos[] = { /* PCI devices. */ [RTL_GIGA_MAC_VER_01] = - _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_02] = - _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_03] = - _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_04] = - _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_05] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_06] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), /* PCI-E devices. */ [RTL_GIGA_MAC_VER_07] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_08] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_09] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_10] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_11] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_12] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_13] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_14] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_15] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_16] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_18] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_19] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_20] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_21] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_22] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_23] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_24] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_25] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, - JUMBO_9K, false), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K), [RTL_GIGA_MAC_VER_26] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, - JUMBO_9K, false), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K), [RTL_GIGA_MAC_VER_27] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_28] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_29] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, - JUMBO_1K, true), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_30] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, - JUMBO_1K, true), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_31] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_32] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, - JUMBO_9K, false), + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K), [RTL_GIGA_MAC_VER_33] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, - JUMBO_9K, false), + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K), [RTL_GIGA_MAC_VER_34] = - _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, - JUMBO_9K, false), + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K), [RTL_GIGA_MAC_VER_35] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, - JUMBO_9K, false), + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K), [RTL_GIGA_MAC_VER_36] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, - JUMBO_9K, false), + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K), [RTL_GIGA_MAC_VER_37] = - _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, - JUMBO_1K, true), + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K), [RTL_GIGA_MAC_VER_38] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, - JUMBO_9K, false), + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K), [RTL_GIGA_MAC_VER_39] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, - JUMBO_1K, true), + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_40] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, - JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K), [RTL_GIGA_MAC_VER_41] = - _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_42] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, - JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K), [RTL_GIGA_MAC_VER_43] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, - JUMBO_1K, true), + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_44] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, - JUMBO_9K, false), + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K), [RTL_GIGA_MAC_VER_45] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, - JUMBO_9K, false), + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K), [RTL_GIGA_MAC_VER_46] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, - JUMBO_9K, false), + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K), [RTL_GIGA_MAC_VER_47] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, - JUMBO_1K, false), + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_48] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, - JUMBO_1K, false), + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_49] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_50] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_51] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), }; #undef _R @@ -1954,7 +1929,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev, features &= ~NETIF_F_ALL_TSO; if (dev->mtu > JUMBO_1K && - !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + tp->mac_version > RTL_GIGA_MAC_VER_06) features &= ~NETIF_F_IP_CSUM; return features; @@ -8338,7 +8313,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " "tx checksumming: %s]\n", rtl_chip_infos[chipset].jumbo_max, - rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko"); } if (r8168_check_dash(tp)) -- cgit v1.2.3 From 0fe554a46a0ff855376053c7e4204673b7879f05 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 17 Apr 2018 14:25:30 -0700 Subject: hv_netvsc: propogate Hyper-V friendly name into interface alias This patch implement the 'Device Naming' feature of the Hyper-V network device API. In Hyper-V on the host through the GUI or PowerShell it is possible to enable the device naming feature which causes the host to make available to the guest the name of the device. This shows up in the RNDIS protocol as the friendly name. The name has no particular meaning and is limited to 256 characters. The value can only be set via PowerShell on the host, but could be scripted for mass deployments. The default value is the string 'Network Adapter' and since that is the same for all devices and useless, the driver ignores it. In Windows, the value goes into a registry key for use in SNMP ifAlias. For Linux, this patch puts the value in the network device alias property; where it is visible in ip tools and SNMP. The host provided ifAlias is just a suggestion, and can be overridden by later ip commands. Also requires exporting dev_set_alias in netdev core. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/rndis_filter.c | 28 ++++++++++++++++++++++++++++ net/core/dev.c | 1 + 2 files changed, 29 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 6b127be781d9..3b6dbacaf77d 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "hyperv_net.h" #include "netvsc_trace.h" @@ -1223,6 +1224,29 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, return ret; } +static void rndis_get_friendly_name(struct net_device *net, + struct rndis_device *rndis_device, + struct netvsc_device *net_device) +{ + ucs2_char_t wname[256]; + unsigned long len; + u8 ifalias[256]; + u32 size; + + size = sizeof(wname); + if (rndis_filter_query_device(rndis_device, net_device, + RNDIS_OID_GEN_FRIENDLY_NAME, + wname, &size) != 0) + return; + + /* Convert Windows Unicode string to UTF-8 */ + len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias)); + + /* ignore the default value from host */ + if (strcmp(ifalias, "Network Adapter") != 0) + dev_set_alias(net, ifalias, len); +} + struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { @@ -1276,6 +1300,10 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + /* Get friendly name as ifalias*/ + if (!net->ifalias) + rndis_get_friendly_name(net, rndis_device, net_device); + /* Query and set hardware capabilities */ ret = rndis_netdev_set_hwcaps(rndis_device, net_device); if (ret != 0) diff --git a/net/core/dev.c b/net/core/dev.c index 969462ebb296..a490ef643586 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1285,6 +1285,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return len; } +EXPORT_SYMBOL(dev_set_alias); /** * dev_get_alias - get ifalias of a device -- cgit v1.2.3 From 0dcec221dd39d4d48fe6b22b4cacf2f49b95ffa0 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Tue, 17 Apr 2018 15:31:47 -0700 Subject: hv_netvsc: Add NetVSP v6 and v6.1 into version negotiation This patch adds the NetVSP v6 and 6.1 message structures, and includes these versions into NetVSC/NetVSP version negotiation process. Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 164 ++++++++++++++++++++++++++++++++++++++++ drivers/net/hyperv/netvsc.c | 3 +- 2 files changed, 166 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 960f06141472..6ebe39a3dde6 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -237,6 +237,8 @@ void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); #define NVSP_PROTOCOL_VERSION_2 0x30002 #define NVSP_PROTOCOL_VERSION_4 0x40000 #define NVSP_PROTOCOL_VERSION_5 0x50000 +#define NVSP_PROTOCOL_VERSION_6 0x60000 +#define NVSP_PROTOCOL_VERSION_61 0x60001 enum { NVSP_MSG_TYPE_NONE = 0, @@ -308,6 +310,12 @@ enum { NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, + + /* Version 6 messages */ + NVSP_MSG6_TYPE_PD_API, + NVSP_MSG6_TYPE_PD_POST_BATCH, + + NVSP_MSG6_MAX = NVSP_MSG6_TYPE_PD_POST_BATCH }; enum { @@ -619,12 +627,168 @@ union nvsp_5_message_uber { struct nvsp_5_send_indirect_table send_table; } __packed; +enum nvsp_6_pd_api_op { + PD_API_OP_CONFIG = 1, + PD_API_OP_SW_DATAPATH, /* Switch Datapath */ + PD_API_OP_OPEN_PROVIDER, + PD_API_OP_CLOSE_PROVIDER, + PD_API_OP_CREATE_QUEUE, + PD_API_OP_FLUSH_QUEUE, + PD_API_OP_FREE_QUEUE, + PD_API_OP_ALLOC_COM_BUF, /* Allocate Common Buffer */ + PD_API_OP_FREE_COM_BUF, /* Free Common Buffer */ + PD_API_OP_MAX +}; + +struct grp_affinity { + u64 mask; + u16 grp; + u16 reserved[3]; +} __packed; + +struct nvsp_6_pd_api_req { + u32 op; + + union { + /* MMIO information is sent from the VM to VSP */ + struct __packed { + u64 mmio_pa; /* MMIO Physical Address */ + u32 mmio_len; + + /* Number of PD queues a VM can support */ + u16 num_subchn; + } config; + + /* Switch Datapath */ + struct __packed { + /* Host Datapath Is PacketDirect */ + u8 host_dpath_is_pd; + + /* Guest PacketDirect Is Enabled */ + u8 guest_pd_enabled; + } sw_dpath; + + /* Open Provider*/ + struct __packed { + u32 prov_id; /* Provider id */ + u32 flag; + } open_prov; + + /* Close Provider */ + struct __packed { + u32 prov_id; + } cls_prov; + + /* Create Queue*/ + struct __packed { + u32 prov_id; + u16 q_id; + u16 q_size; + u8 is_recv_q; + u8 is_rss_q; + u32 recv_data_len; + struct grp_affinity affy; + } cr_q; + + /* Delete Queue*/ + struct __packed { + u32 prov_id; + u16 q_id; + } del_q; + + /* Flush Queue */ + struct __packed { + u32 prov_id; + u16 q_id; + } flush_q; + + /* Allocate Common Buffer */ + struct __packed { + u32 len; + u32 pf_node; /* Preferred Node */ + u16 region_id; + } alloc_com_buf; + + /* Free Common Buffer */ + struct __packed { + u32 len; + u64 pa; /* Physical Address */ + u32 pf_node; /* Preferred Node */ + u16 region_id; + u8 cache_type; + } free_com_buf; + } __packed; +} __packed; + +struct nvsp_6_pd_api_comp { + u32 op; + u32 status; + + union { + struct __packed { + /* actual number of PD queues allocated to the VM */ + u16 num_pd_q; + + /* Num Receive Rss PD Queues */ + u8 num_rss_q; + + u8 is_supported; /* Is supported by VSP */ + u8 is_enabled; /* Is enabled by VSP */ + } config; + + /* Open Provider */ + struct __packed { + u32 prov_id; + } open_prov; + + /* Create Queue */ + struct __packed { + u32 prov_id; + u16 q_id; + u16 q_size; + u32 recv_data_len; + struct grp_affinity affy; + } cr_q; + + /* Allocate Common Buffer */ + struct __packed { + u64 pa; /* Physical Address */ + u32 len; + u32 pf_node; /* Preferred Node */ + u16 region_id; + u8 cache_type; + } alloc_com_buf; + } __packed; +} __packed; + +struct nvsp_6_pd_buf { + u32 region_offset; + u16 region_id; + u16 is_partial:1; + u16 reserved:15; +} __packed; + +struct nvsp_6_pd_batch_msg { + struct nvsp_message_header hdr; + u16 count; + u16 guest2host:1; + u16 is_recv:1; + u16 reserved:14; + struct nvsp_6_pd_buf pd_buf[0]; +} __packed; + +union nvsp_6_message_uber { + struct nvsp_6_pd_api_req pd_req; + struct nvsp_6_pd_api_comp pd_comp; +} __packed; + union nvsp_all_messages { union nvsp_message_init_uber init_msg; union nvsp_1_message_uber v1_msg; union nvsp_2_message_uber v2_msg; union nvsp_4_message_uber v4_msg; union nvsp_5_message_uber v5_msg; + union nvsp_6_message_uber v6_msg; } __packed; /* ALL Messages */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 04f611e6f678..e7308958b7a9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -525,7 +525,8 @@ static int netvsc_connect_vsp(struct hv_device *device, struct net_device *ndev = hv_get_drvdata(device); static const u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, - NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 + NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5, + NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61 }; struct nvsp_message *init_packet; int ndis_version, i, ret; -- cgit v1.2.3 From e4a2eb8995365858f0dad6985b119313f0a4624f Mon Sep 17 00:00:00 2001 From: Bjoern Johansson Date: Wed, 11 Apr 2018 11:41:56 -0700 Subject: mac80211_hwsim: indicate support for powersave. Without this, higher layers in the kernel will return an error code when trying to set the power state because the driver doesn't indicate power state support. This in turn causes VTS (Android Vendor Test Suite) failures because the WiFi HAL can't enable power saving mode. Signed-off-by: Bjoern Johansson Signed-off-by: Lingfeng Yang Signed-off-by: Roman Kiryanov [johannes: remove remaining code, it was useless even as a skeleton since it didn't even have the right function arguments] Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 96d26cfae90b..7bfbc5916e9a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2650,6 +2650,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); -- cgit v1.2.3 From 235b9c42766129b2534d1dd4021e04d39128901a Mon Sep 17 00:00:00 2001 From: Venkateswara Naralasetty Date: Tue, 10 Apr 2018 18:01:05 +0300 Subject: ath10k: Add tx ack signal support for management frames This patch add support to get RSSI from acknowledgment frames for transmitted management frames. hardware_used: QCA4019, QCA9984. firmware version: 10.4-3.5.3-00052. Signed-off-by: Venkateswara Naralasetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 2 ++ drivers/net/wireless/ath/ath10k/htt.h | 10 +++++++++- drivers/net/wireless/ath/ath10k/htt_rx.c | 10 ++++++++++ drivers/net/wireless/ath/ath10k/txrx.c | 8 ++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 7 +++++++ 5 files changed, 36 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index c17d805d68cc..e4ac8f2831fd 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -52,6 +52,8 @@ /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 +#define ATH10K_INVALID_RSSI 128 + #define ATH10K_MAX_NUM_MGMT_PENDING 128 /* number of failed packets (20 packets with 16 sw reties each) */ diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 8cc2a8b278e4..e5dbb0bdd787 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -533,12 +534,18 @@ struct htt_ver_resp { u8 rsvd0; } __packed; +#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0) + +#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0) + struct htt_mgmt_tx_completion { u8 rsvd0; u8 rsvd1; - u8 rsvd2; + u8 flags; __le32 desc_id; __le32 status; + __le32 ppdu_id; + __le32 info; } __packed; #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) @@ -1648,6 +1655,7 @@ struct htt_resp { struct htt_tx_done { u16 msdu_id; u16 status; + u8 ack_rssi; }; enum htt_tx_compl_state { diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 5e02e26158f6..0315bdad7f85 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -25,6 +25,7 @@ #include "mac.h" #include +#include /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 @@ -2719,12 +2720,21 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { struct htt_tx_done tx_done = {}; int status = __le32_to_cpu(resp->mgmt_tx_completion.status); + int info = __le32_to_cpu(resp->mgmt_tx_completion.info); tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); switch (status) { case HTT_MGMT_TX_STATUS_OK: tx_done.status = HTT_TX_COMPL_STATE_ACK; + if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + ar->wmi.svc_map) && + (resp->mgmt_tx_completion.flags & + HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { + tx_done.ack_rssi = + FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, + info); + } break; case HTT_MGMT_TX_STATUS_RETRY: tx_done.status = HTT_TX_COMPL_STATE_NOACK; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 70e23bbf7171..cda164f6e9f6 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -119,6 +120,13 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, info->flags &= ~IEEE80211_TX_STAT_ACK; } + if (tx_done->status == HTT_TX_COMPL_STATE_ACK && + tx_done->ack_rssi != ATH10K_INVALID_RSSI) { + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + + tx_done->ack_rssi; + info->status.is_valid_ack_signal = true; + } + ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 6fbc84c29521..3cc129d812a4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -201,6 +201,7 @@ enum wmi_service { WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, WMI_SERVICE_TPC_STATS_FINAL, + WMI_SERVICE_RESET_CHIP, /* keep last */ WMI_SERVICE_MAX, @@ -238,6 +239,8 @@ enum wmi_10x_service { WMI_10X_SERVICE_MESH, WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, WMI_10X_SERVICE_PEER_STATS, + WMI_10X_SERVICE_RESET_CHIP, + WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, }; enum wmi_main_service { @@ -548,6 +551,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); SVCMAP(WMI_10X_SERVICE_PEER_STATS, WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10X_SERVICE_RESET_CHIP, + WMI_SERVICE_RESET_CHIP, len); + SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, -- cgit v1.2.3 From a0aedd6e0b5755a4c1cf288744a0fbef667f9f8f Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:11 +0300 Subject: ath10k: build ce layer in ath10k core module CE layer is shared between pci and snoc target and results in duplicate object inclusion if both modules are compiled together statically and undefined KBUILD_MODNAME if compiled as module. Fix this by building ce layer in ath10k core module by adding ce object inclusion with ATH10K_CE boolean CONFIG. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 4 ++++ drivers/net/wireless/ath/ath10k/Makefile | 4 ++-- drivers/net/wireless/ath/ath10k/ce.c | 22 ++++++++++++++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index deb5ae21a559..d266f2792dae 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -4,12 +4,16 @@ config ATH10K select ATH_COMMON select CRC32 select WANT_DEV_COREDUMP + select ATH10K_CE ---help--- This module adds support for wireless adapters based on Atheros IEEE 802.11ac family of chipsets. If you choose to build a module, it'll be called ath10k. +config ATH10K_CE + bool + config ATH10K_PCI tristate "Atheros ath10k PCI support" depends on ATH10K && PCI diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 6739ac26fd29..536f3dfb9364 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -22,10 +22,10 @@ ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o ath10k_core-$(CONFIG_PM) += wow.o ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o +ath10k_core-$(CONFIG_ATH10K_CE) += ce.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o -ath10k_pci-y += pci.o \ - ce.o +ath10k_pci-y += pci.o ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index b9def7bace2f..e7e7b342e5b8 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -464,6 +464,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); } +EXPORT_SYMBOL(ath10k_ce_send_nolock); void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { @@ -491,6 +492,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) src_ring->per_transfer_context[src_ring->write_index] = NULL; } +EXPORT_SYMBOL(__ath10k_ce_send_revert); int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, @@ -510,6 +512,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_send); int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { @@ -525,6 +528,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) return delta; } +EXPORT_SYMBOL(ath10k_ce_num_free_src_entries); int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { @@ -539,6 +543,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } +EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs); static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) @@ -622,6 +627,7 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); dest_ring->write_index = write_index; } +EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx); int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) @@ -636,6 +642,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, return ret; } +EXPORT_SYMBOL(ath10k_ce_rx_post_buf); /* * Guts of ath10k_ce_completed_recv_next. @@ -748,6 +755,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, per_transfer_ctx, nbytesp); } +EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock); int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, @@ -766,6 +774,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_recv_next); static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, @@ -882,6 +891,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, per_transfer_contextp, bufferp); } +EXPORT_SYMBOL(ath10k_ce_revoke_recv_next); /* * Guts of ath10k_ce_completed_send_next. @@ -936,6 +946,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } +EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); static void ath10k_ce_extract_desc_data(struct ath10k *ar, struct ath10k_ce_ring *src_ring, @@ -1025,6 +1036,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_cancel_send_next); int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) @@ -1040,6 +1052,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_send_next); /* * Guts of interrupt handler for per-engine interrupts on a particular CE. @@ -1078,6 +1091,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) spin_unlock_bh(&ce->ce_lock); } +EXPORT_SYMBOL(ath10k_ce_per_engine_service); /* * Handler for per-engine interrupts on ALL active CEs. @@ -1102,6 +1116,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service(ar, ce_id); } } +EXPORT_SYMBOL(ath10k_ce_per_engine_service_any); /* * Adjust interrupts for the copy complete handler. @@ -1139,6 +1154,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) return 0; } +EXPORT_SYMBOL(ath10k_ce_disable_interrupts); void ath10k_ce_enable_interrupts(struct ath10k *ar) { @@ -1154,6 +1170,7 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) ath10k_ce_per_engine_handler_adjust(ce_state); } } +EXPORT_SYMBOL(ath10k_ce_enable_interrupts); static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, @@ -1454,6 +1471,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, return 0; } +EXPORT_SYMBOL(ath10k_ce_init_pipe); static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) { @@ -1479,6 +1497,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_src_ring(ar, ce_id); ath10k_ce_deinit_dest_ring(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_deinit_pipe); static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { @@ -1545,6 +1564,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ce_state->ops->ce_free_pipe(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_free_pipe); void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) @@ -1584,6 +1604,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar, spin_unlock_bh(&ce->ce_lock); } +EXPORT_SYMBOL(ath10k_ce_dump_registers); static const struct ath10k_ce_ops ce_ops = { .ce_alloc_src_ring = ath10k_ce_alloc_src_ring, @@ -1680,3 +1701,4 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, return 0; } +EXPORT_SYMBOL(ath10k_ce_alloc_pipe); -- cgit v1.2.3 From 17f5559e0da51a8780cd93206689f05feca46615 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:14 +0300 Subject: ath10k: platform driver for WCN3990 SNOC WLAN module WCN3990 is integrated 802.11ac chipset with SNOC bus interface. Add snoc layer driver registration and associated ops. WCN3990 support is not yet complete as cold-boot handshake is done using qmi(Qualcomm-MSM-Interface) and qmi client support will be added once qmi framework is available. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 8 ++ drivers/net/wireless/ath/ath10k/Makefile | 3 + drivers/net/wireless/ath/ath10k/snoc.c | 153 +++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/snoc.h | 76 +++++++++++++++ 4 files changed, 240 insertions(+) create mode 100644 drivers/net/wireless/ath/ath10k/snoc.c create mode 100644 drivers/net/wireless/ath/ath10k/snoc.h (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index d266f2792dae..84f071ac0d84 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -40,6 +40,14 @@ config ATH10K_USB This module adds experimental support for USB bus. Currently work in progress and will not fully work. +config ATH10K_SNOC + tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" + depends on ATH10K && ARCH_QCOM + ---help--- + This module adds support for integrated WCN3990 chip connected + to system NOC(SNOC). Currently work in progress and will not + fully work. + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 536f3dfb9364..44d60a61b242 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -35,5 +35,8 @@ ath10k_sdio-y += sdio.o obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o ath10k_usb-y += usb.o +obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o +ath10k_snoc-y += snoc.o + # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c new file mode 100644 index 000000000000..30354a653b62 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "ce.h" +#include "snoc.h" +#include +#include +#include + +static const struct ath10k_snoc_drv_priv drv_priv = { + .hw_rev = ATH10K_HW_WCN3990, + .dma_mask = DMA_BIT_MASK(37), +}; + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + iowrite32(value, ar_snoc->mem + offset); +} + +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + u32 val; + + val = ioread32(ar_snoc->mem + offset); + + return val; +} + +static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + +static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + +static const struct of_device_id ath10k_snoc_dt_match[] = { + { .compatible = "qcom,wcn3990-wifi", + .data = &drv_priv, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match); + +static int ath10k_snoc_probe(struct platform_device *pdev) +{ + const struct ath10k_snoc_drv_priv *drv_data; + const struct of_device_id *of_id; + struct ath10k_snoc *ar_snoc; + struct device *dev; + struct ath10k *ar; + int ret; + + of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "failed to find matching device tree id\n"); + return -EINVAL; + } + + drv_data = of_id->data; + dev = &pdev->dev; + + ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask); + if (ret) { + dev_err(dev, "failed to set dma mask: %d", ret); + return ret; + } + + ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC, + drv_data->hw_rev, &ath10k_snoc_hif_ops); + if (!ar) { + dev_err(dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ar_snoc = ath10k_snoc_priv(ar); + ar_snoc->dev = pdev; + platform_set_drvdata(pdev, ar); + ar_snoc->ar = ar; + ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; + ar->ce_priv = &ar_snoc->ce; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); + ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); + + return ret; +} + +static int ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + ath10k_core_destroy(ar); + + return 0; +} + +static struct platform_driver ath10k_snoc_driver = { + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, + .driver = { + .name = "ath10k_snoc", + .owner = THIS_MODULE, + .of_match_table = ath10k_snoc_dt_match, + }, +}; + +static int __init ath10k_snoc_init(void) +{ + int ret; + + ret = platform_driver_register(&ath10k_snoc_driver); + if (ret) + pr_err("failed to register ath10k snoc driver: %d\n", + ret); + + return ret; +} +module_init(ath10k_snoc_init); + +static void __exit ath10k_snoc_exit(void) +{ + platform_driver_unregister(&ath10k_snoc_driver); +} +module_exit(ath10k_snoc_exit); + +MODULE_AUTHOR("Qualcomm"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices"); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h new file mode 100644 index 000000000000..cf65b01e0085 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SNOC_H_ +#define _SNOC_H_ + +#include "hw.h" +#include "ce.h" +#include "pci.h" + +struct ath10k_snoc_drv_priv { + enum ath10k_hw_rev hw_rev; + u64 dma_mask; +}; + +struct snoc_state { + u32 pipe_cfg_addr; + u32 svc_to_pipe_map; +}; + +struct ath10k_snoc_pipe { + struct ath10k_ce_pipe *ce_hdl; + u8 pipe_num; + struct ath10k *hif_ce_state; + size_t buf_sz; + /* protect ce info */ + spinlock_t pipe_lock; + struct ath10k_snoc *ar_snoc; +}; + +struct ath10k_snoc_target_info { + u32 target_version; + u32 target_type; + u32 target_revision; + u32 soc_version; +}; + +struct ath10k_snoc_ce_irq { + u32 irq_line; +}; + +struct ath10k_snoc { + struct platform_device *dev; + struct ath10k *ar; + void __iomem *mem; + dma_addr_t mem_pa; + struct ath10k_snoc_target_info target_info; + size_t mem_len; + struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX]; + struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; + struct ath10k_ce ce; + struct timer_list rx_post_retry; +}; + +static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) +{ + return (struct ath10k_snoc *)ar->drv_priv; +} + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); + +#endif /* _SNOC_H_ */ -- cgit v1.2.3 From c963a683e70151dc458e9a85ed4b366b09f65e57 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:15 +0300 Subject: ath10k: add resource init and deinit for WCN3990 Add methods for resource(memory, irq, HOST CE config) initialization and de-initialization for WCN3990 target. WCN3990 target uses 12 copy engine and following CE config. [CE0] :host->target control and raw streams [CE1] :target->host HTT [CE2] :target->host WMI [CE3] :host->target WMI [CE4] :host->target HTT [CE5] :reserved [CE6] :Target autonomous HIF_memcpy [CE7] :reserved [CE8] :reserved [CE9] :target->host HTT [CE10] :target->host HTT [CE11] :target -> host PKTLOG Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 271 +++++++++++++++++++++++++++++++++ 1 file changed, 271 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 30354a653b62..575355ce675f 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -24,12 +24,135 @@ #include #include #include +#define WCN3990_CE_ATTR_FLAGS 0 + +static char *const ce_name[] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, .dma_mask = DMA_BIT_MASK(37), }; +static struct ce_attr host_ce_config_wlan[] = { + /* CE0: host->target HTC control streams */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = NULL, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = NULL, + }, + + /* CE2: target->host WMI */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 64, + .recv_cb = NULL, + }, + + /* CE3: host->target WMI */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = NULL, + }, + + /* CE4: host->target HTT */ + { + .flags = WCN3990_CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 256, + .src_sz_max = 256, + .dest_nentries = 0, + .send_cb = NULL, + }, + + /* CE5: target->host HTT (ipa_uc->target ) */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + .recv_cb = NULL, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = 2048, + .dest_nentries = 2, + }, + + /* CE8: Target to uMC */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target->host HTT */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = NULL, + }, + + /* CE10: target->host HTT */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = NULL, + }, + + /* CE11: target -> host PKTLOG */ + { + .flags = WCN3990_CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = NULL, + }, +}; + void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); @@ -57,6 +180,119 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { .write32 = ath10k_snoc_write32, }; +static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg) +{ + return IRQ_HANDLED; +} + +static int ath10k_snoc_request_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int irqflags = IRQF_TRIGGER_RISING; + int ret, id; + + for (id = 0; id < CE_COUNT_MAX; id++) { + ret = request_irq(ar_snoc->ce_irqs[id].irq_line, + ath10k_snoc_per_engine_handler, + irqflags, ce_name[id], ar); + if (ret) { + ath10k_err(ar, + "failed to register IRQ handler for CE %d: %d", + id, ret); + goto err_irq; + } + } + + return 0; + +err_irq: + for (id -= 1; id >= 0; id--) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); + + return ret; +} + +static void ath10k_snoc_free_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); +} + +static int ath10k_snoc_resource_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + struct resource *res; + int i, ret = 0; + + pdev = ar_snoc->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase"); + if (!res) { + ath10k_err(ar, "Memory base not found in DT\n"); + return -EINVAL; + } + + ar_snoc->mem_pa = res->start; + ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa, + resource_size(res)); + if (!ar_snoc->mem) { + ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n", + &ar_snoc->mem_pa); + return -EINVAL; + } + + for (i = 0; i < CE_COUNT; i++) { + res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i); + if (!res) { + ath10k_err(ar, "failed to get IRQ%d\n", i); + ret = -ENODEV; + goto out; + } + ar_snoc->ce_irqs[i].irq_line = res->start; + } + +out: + return ret; +} + +static int ath10k_snoc_setup_resource(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *pipe; + int i, ret; + + spin_lock_init(&ce->ce_lock); + for (i = 0; i < CE_COUNT; i++) { + pipe = &ar_snoc->pipe_info[i]; + pipe->ce_hdl = &ce->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; + } + + pipe->buf_sz = host_ce_config_wlan[i].src_sz_max; + } + + return 0; +} + +static void ath10k_snoc_release_resource(struct ath10k *ar) +{ + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} + static const struct of_device_id ath10k_snoc_dt_match[] = { { .compatible = "qcom,wcn3990-wifi", .data = &drv_priv, @@ -103,9 +339,41 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; ar->ce_priv = &ar_snoc->ce; + ath10k_snoc_resource_init(ar); + if (ret) { + ath10k_warn(ar, "failed to initialize resource: %d\n", ret); + goto err_core_destroy; + } + + ath10k_snoc_setup_resource(ar); + if (ret) { + ath10k_warn(ar, "failed to setup resource: %d\n", ret); + goto err_core_destroy; + } + ret = ath10k_snoc_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_release_resource; + } + ret = ath10k_core_register(ar, drv_data->hw_rev); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_irq; + } ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); + return 0; + +err_free_irq: + ath10k_snoc_free_irq(ar); + +err_release_resource: + ath10k_snoc_release_resource(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + return ret; } @@ -114,6 +382,9 @@ static int ath10k_snoc_remove(struct platform_device *pdev) struct ath10k *ar = platform_get_drvdata(pdev); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + ath10k_core_unregister(ar); + ath10k_snoc_free_irq(ar); + ath10k_snoc_release_resource(ar); ath10k_core_destroy(ar); return 0; -- cgit v1.2.3 From a6e149a9ff03e00b0a4293c0cc70b37db48fdf80 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:17 +0300 Subject: ath10k: add hif start/stop methods for wcn3990 snoc layer Add hif start/stop callback for allocating/freeing buffers on tx/rx pipe and enabling/disabling the tx/rx pipe interrupts. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 189 ++++++++++++++++++++++++++++++++- 1 file changed, 187 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 575355ce675f..dcd8bb7b71b4 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -25,6 +25,7 @@ #include #include #define WCN3990_CE_ATTR_FLAGS 0 +#define ATH10K_SNOC_RX_POST_RETRY_MS 50 static char *const ce_name[] = { "WLAN_CE_0", @@ -170,9 +171,193 @@ u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) return val; } +static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map snoc rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_RXCB(skb)->paddr = paddr; + + spin_lock_bh(&ce->ce_lock); + ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ce->ce_lock); + if (ret) { + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + spin_lock_bh(&ce->ce_lock); + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ce->ce_lock); + while (num--) { + ret = __ath10k_snoc_rx_post_buf(pipe); + if (ret) { + if (ret == -ENOSPC) + break; + ath10k_warn(ar, "failed to post rx buf: %d\n", ret); + mod_timer(&ar_snoc->rx_post_retry, jiffies + + ATH10K_SNOC_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_snoc_rx_post(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); +} + +static inline void ath10k_snoc_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); +} + +static inline void ath10k_snoc_irq_enable(struct ath10k *ar) +{ + ath10k_ce_enable_interrupts(ar); +} + +static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct ath10k_snoc *ar_snoc; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ar_snoc = ath10k_snoc_priv(ar); + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + ath10k_htc_tx_completion_handler(ar, skb); + } +} + +static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info; + int pipe_num; + + del_timer_sync(&ar_snoc->rx_post_retry); + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + pipe_info = &ar_snoc->pipe_info[pipe_num]; + ath10k_snoc_rx_pipe_cleanup(pipe_info); + ath10k_snoc_tx_pipe_cleanup(pipe_info); + } +} + +static void ath10k_snoc_hif_stop(struct ath10k *ar) +{ + ath10k_snoc_irq_disable(ar); + ath10k_snoc_buffer_cleanup(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); +} + +static int ath10k_snoc_hif_start(struct ath10k *ar) +{ + ath10k_snoc_irq_enable(ar); + ath10k_snoc_rx_post(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + + return 0; +} + static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { - .read32 = ath10k_snoc_read32, - .write32 = ath10k_snoc_write32, + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, + .start = ath10k_snoc_hif_start, + .stop = ath10k_snoc_hif_stop, }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { -- cgit v1.2.3 From b8c27e86211832b06667f11be3c24acc78828ee5 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:18 +0300 Subject: ath10k: add HTC services for WCN3990 WCN3990 target uses 3 Copy engine(CE1/CE9/CE10) in RX path and CE 11 for pktlog. Add data path HTC ep services and PKTLOG services for WCN3990. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.c | 6 ++++++ drivers/net/wireless/ath/ath10k/htc.h | 4 ++++ 2 files changed, 10 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 492dc5b4bbf2..8902720b4e49 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -542,8 +542,14 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id) return "NMI Data"; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG: + return "HTT Data"; case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: return "RAW"; + case ATH10K_HTC_SVC_ID_HTT_LOG_MSG: + return "PKTLOG"; } return "Unknown"; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index a2f8814b3e53..34877597dd6a 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -248,6 +248,7 @@ enum ath10k_htc_svc_gid { ATH10K_HTC_SVC_GRP_WMI = 1, ATH10K_HTC_SVC_GRP_NMI = 2, ATH10K_HTC_SVC_GRP_HTT = 3, + ATH10K_LOG_SERVICE_GROUP = 6, ATH10K_HTC_SVC_GRP_TEST = 254, ATH10K_HTC_SVC_GRP_LAST = 255, @@ -273,6 +274,9 @@ enum ath10k_htc_svc_id { ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), + ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1), + ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2), + ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0), /* raw stream service (i.e. flash, tcmd, calibration apps) */ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), }; -- cgit v1.2.3 From 84efe7f6ebc56dbeb18c3448a487f2265c647d91 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:20 +0300 Subject: ath10k: map HTC services to tx/rx pipes for wcn3990 Add mapping of HTC endpoint services supported by wcn3990 target to tx/rx pipe. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 168 +++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index dcd8bb7b71b4..a39eee7ed56c 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -154,6 +154,116 @@ static struct ce_attr host_ce_config_wlan[] = { }, }; +static struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), + __cpu_to_le32(5), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(9), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(10), + }, + { /* in = DL = target -> host pktlog */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(11), + }, + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); @@ -249,6 +359,62 @@ static void ath10k_snoc_rx_post(struct ath10k *ar) ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); } +static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n"); + + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} + +static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n"); + + (void)ath10k_snoc_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, dl_pipe); +} + static inline void ath10k_snoc_irq_disable(struct ath10k *ar) { ath10k_ce_disable_interrupts(ar); @@ -358,6 +524,8 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .write32 = ath10k_snoc_write32, .start = ath10k_snoc_hif_start, .stop = ath10k_snoc_hif_stop, + .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe, + .get_default_pipe = ath10k_snoc_hif_get_default_pipe, }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { -- cgit v1.2.3 From 0fa4fbe9b8cf7656813382602a6a5d330f01d3ae Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:22 +0300 Subject: ath10k: add hif power-up/power-down methods Add hif power-up/power-down methods for wcn3990 target. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index a39eee7ed56c..9d402e4afaf9 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -519,6 +519,65 @@ static int ath10k_snoc_hif_start(struct ath10k *ar) return 0; } +static int ath10k_snoc_init_pipes(struct ath10k *ar) +{ + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_snoc_wlan_enable(struct ath10k *ar) +{ + return 0; +} + +static void ath10k_snoc_wlan_disable(struct ath10k *ar) +{ +} + +static void ath10k_snoc_hif_power_down(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); + + ath10k_snoc_wlan_disable(ar); +} + +static int ath10k_snoc_hif_power_up(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", + __func__, ar->state); + + ret = ath10k_snoc_wlan_enable(ar); + if (ret) { + ath10k_err(ar, "failed to enable wcn3990: %d\n", ret); + return ret; + } + + ret = ath10k_snoc_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err_wlan_enable; + } + + return 0; + +err_wlan_enable: + ath10k_snoc_wlan_disable(ar); + + return ret; +} + static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .read32 = ath10k_snoc_read32, .write32 = ath10k_snoc_write32, @@ -526,6 +585,8 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .stop = ath10k_snoc_hif_stop, .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe, .get_default_pipe = ath10k_snoc_hif_get_default_pipe, + .power_up = ath10k_snoc_hif_power_up, + .power_down = ath10k_snoc_hif_power_down, }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { -- cgit v1.2.3 From d390509bdf501c9c8c6e61248e4bc9314c86d854 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:24 +0300 Subject: ath10k: add hif tx methods for wcn3990 Add hif tx/tx-complete methods for wcn3990 target. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 123 ++++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 9d402e4afaf9..899f0a327050 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -42,6 +42,9 @@ static char *const ce_name[] = { "WLAN_CE_11", }; +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); + static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, .dma_mask = DMA_BIT_MASK(37), @@ -54,7 +57,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, - .send_cb = NULL, + .send_cb = ath10k_snoc_htc_tx_cb, }, /* CE1: target->host HTT + HTC control */ @@ -81,7 +84,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, - .send_cb = NULL, + .send_cb = ath10k_snoc_htc_tx_cb, }, /* CE4: host->target HTT */ @@ -90,7 +93,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 256, .src_sz_max = 256, .dest_nentries = 0, - .send_cb = NULL, + .send_cb = ath10k_snoc_htt_tx_cb, }, /* CE5: target->host HTT (ipa_uc->target ) */ @@ -359,6 +362,117 @@ static void ath10k_snoc_rx_post(struct ath10k *ar) ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); } +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff_head list; + struct sk_buff *skb; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) + ath10k_htc_tx_completion_handler(ar, skb); +} + +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff *skb; + + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *snoc_pipe; + struct ath10k_ce_pipe *ce_pipe; + int err, i = 0; + + snoc_pipe = &ar_snoc->pipe_info[pipe_id]; + ce_pipe = snoc_pipe->ce_hdl; + spin_lock_bh(&ce->ce_lock); + + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; + + spin_unlock_bh(&ce->ce_lock); + + return 0; + +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); + + spin_unlock_bh(&ce->ce_lock); + return err; +} + +static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n"); + + return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl); +} + +static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) +{ + int resources; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n"); + + if (!force) { + resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe); + + if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} + static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) @@ -587,6 +701,9 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .get_default_pipe = ath10k_snoc_hif_get_default_pipe, .power_up = ath10k_snoc_hif_power_up, .power_down = ath10k_snoc_hif_power_down, + .tx_sg = ath10k_snoc_hif_tx_sg, + .send_complete_check = ath10k_snoc_hif_send_complete_check, + .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { -- cgit v1.2.3 From d915105231ca0581a9f87e59ed00bc17a54e254f Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:25 +0300 Subject: ath10k: add hif rx methods for wcn3990 Add hif rx methods in rx path for wcn3990 target. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 171 +++++++++++++++++++++++++++++---- 1 file changed, 153 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 899f0a327050..6d9cccee9bbe 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -26,6 +26,7 @@ #include #define WCN3990_CE_ATTR_FLAGS 0 #define ATH10K_SNOC_RX_POST_RETRY_MS 50 +#define CE_POLL_PIPE 4 static char *const ce_name[] = { "WLAN_CE_0", @@ -44,6 +45,9 @@ static char *const ce_name[] = { static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, @@ -53,7 +57,7 @@ static const struct ath10k_snoc_drv_priv drv_priv = { static struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control streams */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, @@ -62,25 +66,25 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE1: target->host HTT + HTC control */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, }, /* CE2: target->host WMI */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 64, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htc_rx_cb, }, /* CE3: host->target WMI */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, @@ -89,7 +93,7 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE4: host->target HTT */ { - .flags = WCN3990_CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 256, .src_sz_max = 256, .dest_nentries = 0, @@ -98,16 +102,16 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE5: target->host HTT (ipa_uc->target ) */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 512, .dest_nentries = 512, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htt_rx_cb, }, /* CE6: target autonomous hif_memcpy */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, @@ -115,7 +119,7 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE7: ce_diag, the Diagnostic Window */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 2, .src_sz_max = 2048, .dest_nentries = 2, @@ -123,7 +127,7 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE8: Target to uMC */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, @@ -131,29 +135,29 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE9 target->host HTT */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, }, /* CE10: target->host HTT */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, }, /* CE11: target -> host PKTLOG */ { - .flags = WCN3990_CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = NULL, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, }, }; @@ -362,6 +366,82 @@ static void ath10k_snoc_rx_post(struct ath10k *ar) ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); } +static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id]; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n", + ce_state->id, skb->len); + + callback(ar, skb); + } + + ath10k_snoc_rx_post_pipe(pipe_info); +} + +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} + +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver); +} + +static void ath10k_snoc_rx_replenish_retry(struct timer_list *t) +{ + struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); + struct ath10k *ar = ar_snoc->ar; + + ath10k_snoc_rx_post(ar); +} + static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; @@ -620,6 +700,8 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar) { ath10k_snoc_irq_disable(ar); ath10k_snoc_buffer_cleanup(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); } @@ -684,6 +766,7 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) goto err_wlan_enable; } + napi_enable(&ar->napi); return 0; err_wlan_enable: @@ -711,11 +794,60 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { .write32 = ath10k_snoc_write32, }; +int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (ar_snoc->ce_irqs[i].irq_line == irq) + return i; + } + ath10k_err(ar, "No matching CE id for irq %d\n", irq); + + return -EINVAL; +} + static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg) { + struct ath10k *ar = arg; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq); + + if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) { + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); + return IRQ_HANDLED; + } + + ath10k_snoc_irq_disable(ar); + napi_schedule(&ar->napi); + return IRQ_HANDLED; } +static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done = 0; + + ath10k_ce_per_engine_service_any(ar); + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { + napi_complete(ctx); + ath10k_snoc_irq_enable(ar); + } + + return done; +} + +void ath10k_snoc_init_napi(struct ath10k *ar) +{ + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, + ATH10K_NAPI_BUDGET); +} + static int ath10k_snoc_request_irq(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); @@ -796,6 +928,7 @@ static int ath10k_snoc_setup_resource(struct ath10k *ar) struct ath10k_snoc_pipe *pipe; int i, ret; + timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0); spin_lock_init(&ce->ce_lock); for (i = 0; i < CE_COUNT; i++) { pipe = &ar_snoc->pipe_info[i]; @@ -812,6 +945,7 @@ static int ath10k_snoc_setup_resource(struct ath10k *ar) pipe->buf_sz = host_ce_config_wlan[i].src_sz_max; } + ath10k_snoc_init_napi(ar); return 0; } @@ -820,6 +954,7 @@ static void ath10k_snoc_release_resource(struct ath10k *ar) { int i; + netif_napi_del(&ar->napi); for (i = 0; i < CE_COUNT; i++) ath10k_ce_free_pipe(ar, i); } -- cgit v1.2.3 From 546d407c905bc893886edae52f2323db8eded9b9 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:27 +0300 Subject: ath10k: modify hif tx paddr to dma_addr_t type Change type of hif sg tx paddr to dma_addr_t for supporting target having addressing mode greater than 32 bit. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/hif.h | 2 +- drivers/net/wireless/ath/ath10k/pci.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 6da4e3369c5a..7abb13c1f10b 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -26,7 +26,7 @@ struct ath10k_hif_sg_item { u16 transfer_id; void *transfer_context; /* NULL = tx completion callback not called */ void *vaddr; /* for debugging mostly */ - u32 paddr; + dma_addr_t paddr; u16 len; }; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index fd1566cd7d2b..af2cf55c4c1e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1383,8 +1383,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, for (i = 0; i < n_items - 1; i++) { ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); @@ -1401,8 +1401,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, /* `i` is equal to `n_items -1` after for() */ ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); -- cgit v1.2.3 From 140d1214ef555bcb14c7720e91d8a9594e4ab506 Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Tue, 10 Apr 2018 18:01:29 +0300 Subject: ath10k: add support to get target info from hif ops wcn3990 does not use bmi. Add support to get target info from hif ops. Signed-off-by: Rakesh Pillai Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 8 ++++++++ drivers/net/wireless/ath/ath10k/hif.h | 13 +++++++++++++ drivers/net/wireless/ath/ath10k/snoc.c | 10 ++++++++++ 3 files changed, 31 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 8a3020dbd4cf..6a9ad4ab8e4c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2472,6 +2472,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ar->hw->wiphy->hw_version = target_info.version; break; case ATH10K_BUS_SNOC: + memset(&target_info, 0, sizeof(target_info)); + ret = ath10k_hif_get_target_info(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; break; default: ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus); diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 7abb13c1f10b..1a59ea0068c2 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -20,6 +20,7 @@ #include #include "core.h" +#include "bmi.h" #include "debug.h" struct ath10k_hif_sg_item { @@ -93,6 +94,9 @@ struct ath10k_hif_ops { /* fetch calibration data from target eeprom */ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, size_t *data_len); + + int (*get_target_info)(struct ath10k *ar, + struct bmi_target_info *target_info); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -218,4 +222,13 @@ static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); } +static inline int ath10k_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *tgt_info) +{ + if (!ar->hif.ops->get_target_info) + return -EOPNOTSUPP; + + return ar->hif.ops->get_target_info(ar, tgt_info); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 6d9cccee9bbe..1ef0d3b51b8f 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -528,6 +528,15 @@ err: return err; } +static int ath10k_snoc_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *target_info) +{ + target_info->version = ATH10K_HW_WCN3990; + target_info->type = ATH10K_HW_WCN3990; + + return 0; +} + static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); @@ -787,6 +796,7 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .tx_sg = ath10k_snoc_hif_tx_sg, .send_complete_check = ath10k_snoc_hif_send_complete_check, .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, + .get_target_info = ath10k_snoc_hif_get_target_info, }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { -- cgit v1.2.3 From ea66b12e63ac62de19685c6900903d22e2680be6 Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Tue, 10 Apr 2018 18:01:32 +0300 Subject: ath10k: check all CE for data if irq summary is not retained WCN3990 has interrupts per CE and the interrupt summary is not retained after the interrupt handler has finished execution. We need to check if we received any ce in rx and tx completion path. Generate a interrupt summary with all CE interrupts if the target does not retain interrupt summary after the execution of interrupt handler. Signed-off-by: Rakesh Pillai Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.h | 10 +++++++--- drivers/net/wireless/ath/ath10k/core.c | 13 +++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 3 +++ 3 files changed, 23 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 2c3c8f5e90ea..d8f9da334529 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -355,14 +355,18 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0)) static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) { struct ath10k_ce *ce = ath10k_ce_priv(ar); - return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( - ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + if (!ar->hw_params.per_ce_irq) + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + else + return CE_INTERRUPT_SUMMARY; } #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 6a9ad4ab8e4c..a21530dacc35 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -119,6 +119,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA9887_HW_1_0_VERSION, @@ -148,6 +149,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -176,6 +178,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -204,6 +207,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA6174_HW_3_0_VERSION, @@ -232,6 +236,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -263,6 +268,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -297,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -336,6 +343,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -374,6 +382,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -402,6 +411,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -432,6 +442,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -467,6 +478,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -487,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, + .per_ce_irq = true, }, }; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 413b1b4321f7..3041eba61e54 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -568,6 +568,9 @@ struct ath10k_hw_params { /* Target rx ring fill level */ u32 rx_ring_fill_level; + + /* target supporting per ce IRQ */ + bool per_ce_irq; }; struct htt_rx_desc; -- cgit v1.2.3 From a6a793f98786fe146f8926b02b320f0d9b48a61c Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 10 Apr 2018 18:01:34 +0300 Subject: ath10k: vote for hardware resources for WCN3990 Add clock and regulator votes for WCN3990 WLAN chipset. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 313 ++++++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/snoc.h | 19 ++ 2 files changed, 331 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 1ef0d3b51b8f..2e490ff124f1 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #define WCN3990_CE_ATTR_FLAGS 0 #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 @@ -43,6 +45,17 @@ static char *const ce_name[] = { "WLAN_CE_11", }; +static struct ath10k_wcn3990_vreg_info vreg_cfg[] = { + {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false}, + {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false}, + {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false}, + {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false}, +}; + +static struct ath10k_wcn3990_clk_info clk_cfg[] = { + {NULL, "cxo_ref_clk_pin", 0, false}, +}; + static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); @@ -969,6 +982,277 @@ static void ath10k_snoc_release_resource(struct ath10k *ar) ath10k_ce_free_pipe(ar, i); } +static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_vreg_info *vreg_info) +{ + struct regulator *reg; + int ret = 0; + + reg = devm_regulator_get_optional(dev, vreg_info->name); + + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + + if (ret == -EPROBE_DEFER) { + ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n", + vreg_info->name); + return ret; + } + if (vreg_info->required) { + ath10k_err(ar, "Regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + return ret; + } + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Optional regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto done; + } + + vreg_info->reg = reg; + +done: + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v, + vreg_info->load_ua, vreg_info->settle_delay); + + return 0; +} + +static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_clk_info *clk_info) +{ + struct clk *handle; + int ret = 0; + + handle = devm_clk_get(dev, clk_info->name); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + if (clk_info->required) { + ath10k_err(ar, "snoc clock %s isn't available: %d\n", + clk_info->name, ret); + return ret; + } + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n", + clk_info->name, + ret); + return 0; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n", + clk_info->name, clk_info->freq); + + clk_info->handle = handle; + + return ret; +} + +static int ath10k_wcn3990_vreg_on(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_vreg_info *vreg_info; + int ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, + vreg_info->max_v); + if (ret) { + ath10k_err(ar, + "failed to set regulator %s voltage-min: %d voltage-max: %d\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v); + goto err_reg_config; + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, + vreg_info->load_ua); + if (ret < 0) { + ath10k_err(ar, + "failed to set regulator %s load: %d\n", + vreg_info->name, + vreg_info->load_ua); + goto err_reg_config; + } + } + + ret = regulator_enable(vreg_info->reg); + if (ret) { + ath10k_err(ar, "failed to enable regulator %s\n", + vreg_info->name); + goto err_reg_config; + } + + if (vreg_info->settle_delay) + udelay(vreg_info->settle_delay); + } + + return 0; + +err_reg_config: + for (; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + regulator_disable(vreg_info->reg); + regulator_set_load(vreg_info->reg, 0); + regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + } + + return ret; +} + +static int ath10k_wcn3990_vreg_off(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_vreg_info *vreg_info; + int ret = 0; + int i; + + for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + ath10k_err(ar, "failed to disable regulator %s\n", + vreg_info->name); + + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + ath10k_err(ar, "failed to set load %s\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_v); + if (ret) + ath10k_err(ar, "failed to set voltage %s\n", + vreg_info->name); + } + + return ret; +} + +static int ath10k_wcn3990_clk_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_clk_info *clk_info; + int ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n", + clk_info->name); + + if (clk_info->freq) { + ret = clk_set_rate(clk_info->handle, clk_info->freq); + + if (ret) { + ath10k_err(ar, "failed to set clock %s freq %u\n", + clk_info->name, clk_info->freq); + goto err_clock_config; + } + } + + ret = clk_prepare_enable(clk_info->handle); + if (ret) { + ath10k_err(ar, "failed to enable clock %s\n", + clk_info->name); + goto err_clock_config; + } + } + + return 0; + +err_clock_config: + for (; i >= 0; i--) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + clk_disable_unprepare(clk_info->handle); + } + + return ret; +} + +static int ath10k_wcn3990_clk_deinit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_clk_info *clk_info; + int i; + + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n", + clk_info->name); + + clk_disable_unprepare(clk_info->handle); + } + + return 0; +} + +static int ath10k_hw_power_on(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n"); + + ret = ath10k_wcn3990_vreg_on(ar); + if (ret) + return ret; + + ret = ath10k_wcn3990_clk_init(ar); + if (ret) + goto vreg_off; + + return ret; + +vreg_off: + ath10k_wcn3990_vreg_off(ar); + return ret; +} + +static int ath10k_hw_power_off(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n"); + + ath10k_wcn3990_clk_deinit(ar); + + ret = ath10k_wcn3990_vreg_off(ar); + + return ret; +} + static const struct of_device_id ath10k_snoc_dt_match[] = { { .compatible = "qcom,wcn3990-wifi", .data = &drv_priv, @@ -985,6 +1269,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev) struct device *dev; struct ath10k *ar; int ret; + u32 i; of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); if (!of_id) { @@ -1031,16 +1316,41 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ath10k_warn(ar, "failed to request irqs: %d\n", ret); goto err_release_resource; } + + ar_snoc->vreg = vreg_cfg; + for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) { + ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]); + if (ret) + goto err_free_irq; + } + + ar_snoc->clk = clk_cfg; + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]); + if (ret) + goto err_free_irq; + } + + ret = ath10k_hw_power_on(ar); + if (ret) { + ath10k_err(ar, "failed to power on device: %d\n", ret); + goto err_free_irq; + } + ret = ath10k_core_register(ar, drv_data->hw_rev); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); - goto err_free_irq; + goto err_hw_power_off; } + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); return 0; +err_hw_power_off: + ath10k_hw_power_off(ar); + err_free_irq: ath10k_snoc_free_irq(ar); @@ -1059,6 +1369,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev) ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); ath10k_core_unregister(ar); + ath10k_hw_power_off(ar); ath10k_snoc_free_irq(ar); ath10k_snoc_release_resource(ar); ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index cf65b01e0085..05dc98f46ccd 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -52,6 +52,23 @@ struct ath10k_snoc_ce_irq { u32 irq_line; }; +struct ath10k_wcn3990_vreg_info { + struct regulator *reg; + const char *name; + u32 min_v; + u32 max_v; + u32 load_ua; + unsigned long settle_delay; + bool required; +}; + +struct ath10k_wcn3990_clk_info { + struct clk *handle; + const char *name; + u32 freq; + bool required; +}; + struct ath10k_snoc { struct platform_device *dev; struct ath10k *ar; @@ -63,6 +80,8 @@ struct ath10k_snoc { struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; struct ath10k_ce ce; struct timer_list rx_post_retry; + struct ath10k_wcn3990_vreg_info *vreg; + struct ath10k_wcn3990_clk_info *clk; }; static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) -- cgit v1.2.3 From 5072d87426bb6ed685d3ad9b694f5571859eca06 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Mar 2018 17:53:04 +0100 Subject: ath6kl: fix spelling mistake: "chache" -> "cache" Trivial fix to spelling mistake in message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 0f965e9f38a4..4e94b22eaada 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -645,7 +645,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "CRC Err", tgt_stats->rx_crc_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", - "Key chache miss", tgt_stats->rx_key_cache_miss); + "Key cache miss", tgt_stats->rx_key_cache_miss); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Decrypt Err", tgt_stats->rx_decrypt_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", -- cgit v1.2.3 From 1e8f77502341035f079039c9aa9a647ebc9d881a Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Thu, 12 Apr 2018 17:46:50 +0530 Subject: ath10k: correct target assert problem due to CE5 stuck Correct a minor bug in the commit 0628467f97b5 ("ath10k: fix copy engine 5 destination ring stuck") which introduced a change to fix firmware assert that happens when ring indices of copy engine 5 are stuck for a specific duration, problem with this fix is that it did not use ring arithmatic. As a result,firmware asserts did not go away entirely athough the frequency of occurrence has reduced. Using ring arithmatic to fix the issue. Tested on QCA9984(fw version-10.4-3.4-00082). Fixes: 0628467f97b5 ("ath10k: fix copy engine 5 destination ring stuck) Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index e7e7b342e5b8..3a15923bdd26 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -620,7 +620,7 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) /* Prevent CE ring stuck issue that will occur when ring is full. * Make sure that write index is 1 less than read index. */ - if ((cur_write_idx + nentries) == dest_ring->sw_index) + if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index) nentries -= 1; write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); -- cgit v1.2.3 From b022ca257ca6eabf38fba0c700956bdd0babf3e1 Mon Sep 17 00:00:00 2001 From: Maharaja Kennadyrajan Date: Fri, 13 Apr 2018 13:18:12 +0530 Subject: ath10k: fix a typo in ath10k_wmi_set_wmm_param() Fix a typo in the function ath10k_wmi_set_wmm_param(). Signed-off-by: Maharaja Kennadyrajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index eb1c8a1a1c88..e12dd28fcf5d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -7648,7 +7648,7 @@ ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) cmd->param = __cpu_to_le32(param); ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi pdev get tcp config param:%d\n", param); + "wmi pdev get tpc config param %d\n", param); return skb; } -- cgit v1.2.3 From c8489668065a283d3027e86e877b103a87f99d22 Mon Sep 17 00:00:00 2001 From: Thomas Hebb Date: Fri, 13 Apr 2018 17:40:26 +0300 Subject: ath10k: search all IEs for variant before falling back commit f2593cb1b291 ("ath10k: Search SMBIOS for OEM board file extension") added a feature to ath10k that allows Board Data File (BDF) conflicts between multiple devices that use the same device IDs but have different calibration requirements to be resolved by allowing a "variant" string to be stored in SMBIOS [and later device tree, added by commit d06f26c5c8a4 ("ath10k: search DT for qcom,ath10k-calibration- variant")] that gets appended to the ID stored in board-2.bin. This original patch had a regression, however. Namely that devices with a variant present in SMBIOS that didn't need custom BDFs could no longer find the default BDF, which has no variant appended. The patch was reverted and re-applied with a fix for this issue in commit 1657b8f84ed9 ("search SMBIOS for OEM board file extension"). But the fix to fall back to a default BDF introduced another issue: the driver currently parses IEs in board-2.bin one by one, and for each one it first checks to see if it matches the ID with the variant appended. If it doesn't, it checks to see if it matches the "fallback" ID with no variant. If a matching BDF is found at any point during this search, the search is terminated and that BDF is used. The issue is that it's very possible (and is currently the case for board-2.bin files present in the ath10k-firmware repository) for the default BDF to occur in an earlier IE than the variant-specific BDF. In this case, the current code will happily choose the default BDF even though a better-matching BDF is present later in the file. This patch fixes the issue by first searching the entire file for the ID with variant, and searching for the fallback ID only if that search fails. It also includes some code cleanup in the area, as ath10k_core_fetch_board_data_api_n() no longer does its own string mangling to remove the variant from an ID, instead leaving that job to a new flag passed to ath10k_core_create_board_name(). I've tested this patch on a QCA4019 and verified that the driver behaves correctly for 1) both fallback and variant BDFs present, 2) only fallback BDF present, and 3) no matching BDFs present. Fixes: 1657b8f84ed9 ("ath10k: search SMBIOS for OEM board file extension") Signed-off-by: Thomas Hebb Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 134 ++++++++++++++++++--------------- 1 file changed, 72 insertions(+), 62 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index a21530dacc35..64674d8ce457 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1266,14 +1266,61 @@ out: return ret; } +static int ath10k_core_search_bd(struct ath10k *ar, + const char *boardname, + const u8 *data, + size_t len) +{ + size_t ie_len; + struct ath10k_fw_ie *hdr; + int ret = -ENOENT, ie_id; + + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < ALIGN(ie_len, 4)) { + ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", + ie_id, ie_len, len); + return -EINVAL; + } + + switch (ie_id) { + case ATH10K_BD_IE_BOARD: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname); + if (ret == -ENOENT) + /* no match found, continue */ + break; + + /* either found or error, so stop searching */ + goto out; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + /* return result of parse_bd_ie_board() or -ENOENT */ + return ret; +} + static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const char *boardname, + const char *fallback_boardname, const char *filename) { - size_t len, magic_len, ie_len; - struct ath10k_fw_ie *hdr; + size_t len, magic_len; const u8 *data; - int ret, ie_id; + int ret; ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, @@ -1311,69 +1358,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, data += magic_len; len -= magic_len; - while (len > sizeof(struct ath10k_fw_ie)) { - hdr = (struct ath10k_fw_ie *)data; - ie_id = le32_to_cpu(hdr->id); - ie_len = le32_to_cpu(hdr->len); - - len -= sizeof(*hdr); - data = hdr->data; - - if (len < ALIGN(ie_len, 4)) { - ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", - ie_id, ie_len, len); - ret = -EINVAL; - goto err; - } + /* attempt to find boardname in the IE list */ + ret = ath10k_core_search_bd(ar, boardname, data, len); - switch (ie_id) { - case ATH10K_BD_IE_BOARD: - ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, - boardname); - if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') { - /* try default bdf if variant was not found */ - char *s, *v = ",variant="; - char boardname2[100]; - - strlcpy(boardname2, boardname, - sizeof(boardname2)); - - s = strstr(boardname2, v); - if (s) - *s = '\0'; /* strip ",variant=%s" */ + /* if we didn't find it and have a fallback name, try that */ + if (ret == -ENOENT && fallback_boardname) + ret = ath10k_core_search_bd(ar, fallback_boardname, data, len); - ret = ath10k_core_parse_bd_ie_board(ar, data, - ie_len, - boardname2); - } - - if (ret == -ENOENT) - /* no match found, continue */ - break; - else if (ret) - /* there was an error, bail out */ - goto err; - - /* board data found */ - goto out; - } - - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - - len -= ie_len; - data += ie_len; - } - -out: - if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) { + if (ret == -ENOENT) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", boardname, ar->hw_params.fw.dir, filename); ret = -ENODATA; - goto err; } + if (ret) + goto err; + return 0; err: @@ -1382,12 +1383,12 @@ err: } static int ath10k_core_create_board_name(struct ath10k *ar, char *name, - size_t name_len) + size_t name_len, bool with_variant) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; - if (ar->id.bdf_ext[0] != '\0') + if (with_variant && ar->id.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ar->id.bdf_ext); @@ -1413,17 +1414,26 @@ out: static int ath10k_core_fetch_board_file(struct ath10k *ar) { - char boardname[100]; + char boardname[100], fallback_boardname[100]; int ret; - ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname)); + ret = ath10k_core_create_board_name(ar, boardname, + sizeof(boardname), true); if (ret) { ath10k_err(ar, "failed to create board name: %d", ret); return ret; } + ret = ath10k_core_create_board_name(ar, fallback_boardname, + sizeof(boardname), false); + if (ret) { + ath10k_err(ar, "failed to create fallback board name: %d", ret); + return ret; + } + ar->bd_api = 2; ret = ath10k_core_fetch_board_data_api_n(ar, boardname, + fallback_boardname, ATH10K_BOARD_API2_FILE); if (!ret) goto success; -- cgit v1.2.3 From 5df6e1313044bf0b5326fa54db8a3904d43ec1dc Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Sun, 15 Apr 2018 14:22:27 +0200 Subject: ath10k: add inlined wrappers for htt tx ops These wrappers makes the HTT ops align better with the HIF ops (where similar wrappers are used). It also makes it easier for a target to have unsupported ops (by letting the corresponding function pointer be NULL). Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.c | 4 +-- drivers/net/wireless/ath/ath10k/htt.h | 51 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/htt_tx.c | 12 ++++---- drivers/net/wireless/ath/ath10k/mac.c | 2 +- 4 files changed, 60 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 625198dea18b..21a67f82f037 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -257,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; } - status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); + status = ath10k_htt_send_frag_desc_bank_cfg(htt); if (status) return status; - status = htt->tx_ops->htt_send_rx_ring_cfg(htt); + status = ath10k_htt_send_rx_ring_cfg(htt); if (status) { ath10k_warn(ar, "failed to setup rx ring: %d\n", status); diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index e5dbb0bdd787..68f3bc98fae7 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1856,6 +1856,57 @@ struct ath10k_htt_tx_ops { void (*htt_free_txbuff)(struct ath10k_htt *htt); }; +static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_rx_ring_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_rx_ring_cfg(htt); +} + +static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_frag_desc_bank_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); +} + +static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_frag_desc) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_frag_desc(htt); +} + +static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_frag_desc) + htt->tx_ops->htt_free_frag_desc(htt); +} + +static inline int ath10k_htt_tx(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + return htt->tx_ops->htt_tx(htt, txmode, msdu); +} + +static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_txbuff) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_txbuff(htt); +} + +static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_txbuff) + htt->tx_ops->htt_free_txbuff(htt); +} + struct ath10k_htt_rx_ops { size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index d334b7be1fea..a086958c39b6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -443,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; int ret; - ret = htt->tx_ops->htt_alloc_txbuff(htt); + ret = ath10k_htt_alloc_txbuff(htt); if (ret) { ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); return ret; } - ret = htt->tx_ops->htt_alloc_frag_desc(htt); + ret = ath10k_htt_alloc_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; @@ -473,10 +473,10 @@ free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: - htt->tx_ops->htt_free_frag_desc(htt); + ath10k_htt_free_frag_desc(htt); free_txbuf: - htt->tx_ops->htt_free_txbuff(htt); + ath10k_htt_free_txbuff(htt); return ret; } @@ -530,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) if (!htt->tx_mem_allocated) return; - htt->tx_ops->htt_free_txbuff(htt); + ath10k_htt_free_txbuff(htt); ath10k_htt_tx_free_txq(htt); - htt->tx_ops->htt_free_frag_desc(htt); + ath10k_htt_free_frag_desc(htt); ath10k_htt_tx_free_txdone_fifo(htt); htt->tx_mem_allocated = false; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index bf05a3689558..fc3320f94f45 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3598,7 +3598,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar, switch (txpath) { case ATH10K_MAC_TX_HTT: - ret = htt->tx_ops->htt_tx(htt, txmode, skb); + ret = ath10k_htt_tx(htt, txmode, skb); break; case ATH10K_MAC_TX_HTT_MGMT: ret = ath10k_htt_mgmt_tx(htt, skb); -- cgit v1.2.3 From 9a5511d5f24f331c1709f398b1cff5d538fbae1d Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Sun, 15 Apr 2018 14:22:28 +0200 Subject: ath10k: add inlined wrappers for htt rx ops Added for the same reason as the TX wrappers. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 37 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/htt_rx.c | 14 ++++++------ 2 files changed, 44 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 68f3bc98fae7..4e5fe539eb77 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1916,6 +1916,43 @@ struct ath10k_htt_rx_ops { void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); }; +static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_rx_ring_size) + return 0; + + return htt->rx_ops->htt_get_rx_ring_size(htt); +} + +static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt, + void *vaddr) +{ + if (htt->rx_ops->htt_config_paddrs_ring) + htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); +} + +static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt, + dma_addr_t paddr, + int idx) +{ + if (htt->rx_ops->htt_set_paddrs_ring) + htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); +} + +static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_vaddr_ring) + return NULL; + + return htt->rx_ops->htt_get_vaddr_ring(htt); +} + +static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx) +{ + if (htt->rx_ops->htt_reset_paddrs_ring) + htt->rx_ops->htt_reset_paddrs_ring(htt, idx); +} + #define RX_HTT_HDR_STATUS_LEN 64 /* This structure layout is programmed via rx ring setup diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 0315bdad7f85..bd23f6940488 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -182,7 +182,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) rxcb = ATH10K_SKB_RXCB(skb); rxcb->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; - htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); + ath10k_htt_set_paddrs_ring(htt, paddr, idx); htt->rx_ring.fill_cnt++; if (htt->rx_ring.in_ord_rx) { @@ -287,8 +287,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) ath10k_htt_rx_ring_free(htt); dma_free_coherent(htt->ar->dev, - htt->rx_ops->htt_get_rx_ring_size(htt), - htt->rx_ops->htt_get_vaddr_ring(htt), + ath10k_htt_get_rx_ring_size(htt), + ath10k_htt_get_vaddr_ring(htt), htt->rx_ring.base_paddr); dma_free_coherent(htt->ar->dev, @@ -315,7 +315,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) idx = htt->rx_ring.sw_rd_idx.msdu_payld; msdu = htt->rx_ring.netbufs_ring[idx]; htt->rx_ring.netbufs_ring[idx] = NULL; - htt->rx_ops->htt_reset_paddrs_ring(htt, idx); + ath10k_htt_reset_paddrs_ring(htt, idx); idx++; idx &= htt->rx_ring.size_mask; @@ -587,13 +587,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) if (!htt->rx_ring.netbufs_ring) goto err_netbuf; - size = htt->rx_ops->htt_get_rx_ring_size(htt); + size = ath10k_htt_get_rx_ring_size(htt); vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); if (!vaddr_ring) goto err_dma_ring; - htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring); + ath10k_htt_config_paddrs_ring(htt, vaddr_ring); htt->rx_ring.base_paddr = paddr; vaddr = dma_alloc_coherent(htt->ar->dev, @@ -627,7 +627,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) err_dma_idx: dma_free_coherent(htt->ar->dev, - htt->rx_ops->htt_get_rx_ring_size(htt), + ath10k_htt_get_rx_ring_size(htt), vaddr_ring, htt->rx_ring.base_paddr); err_dma_ring: -- cgit v1.2.3 From 6da2b2d46ff937edc3ecea0a36d92252981d033a Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Mon, 16 Apr 2018 09:56:45 +0530 Subject: ath10k: fix fw path name for WCN3990 target FW path is mapped incorrectly for the WCN3990 hw version. Fix fw path with correct hw1.0 name. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/hw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 3041eba61e54..b025a1bf2fde 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -131,7 +131,7 @@ enum qca9377_chip_id_rev { /* WCN3990 1.0 definitions */ #define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 -#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0" +#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0" #define ATH10K_FW_FILE_BASE "firmware" #define ATH10K_FW_API_MAX 6 -- cgit v1.2.3 From 9ff6fde86fd27a0c733c01114eb0d46bd3a33fb7 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 10 Apr 2018 19:35:58 +0200 Subject: wcn36xx: use READ_ONCE() to access desc->ctrl When accessing shared memory to check for the stat of submitted descriptors, make sure to use READ_ONCE(). This will guarantee the compiler treats these memory locations as volatile and doesn't apply any caching. While this doesn't fix any particular problem I ran into, it's best practice to do it this way. Note that this patch also removes the superflous extra condition check in the do-while loop in reap_tx_dxes(), as the loop will break instantly anyway in that case. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index a6702ada13d3..bd2b946a65c9 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -368,7 +368,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) spin_lock_irqsave(&ch->lock, flags); ctl = ch->tail_blk_ctl; do { - if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD) + if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD) break; if (ctl->skb) { dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, @@ -387,8 +387,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) ctl->skb = NULL; } ctl = ctl->next; - } while (ctl != ch->head_blk_ctl && - !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)); + } while (ctl != ch->head_blk_ctl); ch->tail_blk_ctl = ctl; spin_unlock_irqrestore(&ch->lock, flags); @@ -530,7 +529,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, int_mask = WCN36XX_DXE_INT_CH3_MASK; } - while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) { + while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); -- cgit v1.2.3 From 292eba02dbb4c41da840e462e51ee97d80d873d1 Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Wed, 18 Apr 2018 06:07:39 -0400 Subject: net-next/hinic: add arm64 support This patch enables arm64 platform support for the HINIC driver. Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig index 08db24954f7e..e4e8b24c1a5d 100644 --- a/drivers/net/ethernet/huawei/hinic/Kconfig +++ b/drivers/net/ethernet/huawei/hinic/Kconfig @@ -4,7 +4,7 @@ config HINIC tristate "Huawei Intelligent PCIE Network Interface Card" - depends on (PCI_MSI && X86) + depends on (PCI_MSI && (X86 || ARM64)) ---help--- This driver supports HiNIC PCIE Ethernet cards. To compile this driver as part of the kernel, choose Y here. -- cgit v1.2.3 From 93c2fb253d177a0b8f4f93592441f88c9b7d6245 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Apr 2018 15:38:59 -0700 Subject: net/ipv6: Rename fib6_info struct elements Change the prefix for fib6_info struct elements from rt6i_ to fib6_. rt6i_pcpu and rt6i_exception_bucket are left as is given that they point to rt6_info entries. Rename only; not functional change intended. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 56 ++--- include/net/ip6_fib.h | 38 +-- include/net/ip6_route.h | 26 +- net/ipv6/addrconf.c | 24 +- net/ipv6/anycast.c | 8 +- net/ipv6/ip6_fib.c | 170 ++++++------- net/ipv6/ndisc.c | 2 +- net/ipv6/route.c | 272 ++++++++++----------- 8 files changed, 298 insertions(+), 298 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b85b15851841..8e4edb634b11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4705,17 +4705,17 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) * are trapped to the CPU, so no need to program specific routes * for them. */ - if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL) + if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) return true; /* Multicast routes aren't supported, so ignore them. Neighbour * Discovery packets are specifically trapped. */ - if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) + if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST) return true; /* Cloned routes are irrelevant in the forwarding path. */ - if (rt->rt6i_flags & RTF_CACHE) + if (rt->fib6_flags & RTF_CACHE) return true; return false; @@ -4759,7 +4759,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) { /* RTF_CACHE routes are ignored */ - return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; + return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; } static struct fib6_info * @@ -4784,16 +4784,16 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same * virtual router. */ - if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) continue; - if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (rt->rt6i_metric < nrt->rt6i_metric) + if (rt->fib6_metric < nrt->fib6_metric) continue; - if (rt->rt6i_metric == nrt->rt6i_metric && + if (rt->fib6_metric == nrt->fib6_metric && mlxsw_sp_fib6_rt_can_mp(rt)) return fib6_entry; - if (rt->rt6i_metric > nrt->rt6i_metric) + if (rt->fib6_metric > nrt->fib6_metric) break; } @@ -4899,7 +4899,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, const struct fib6_info *rt) { - return rt->rt6i_flags & RTF_GATEWAY || + return rt->fib6_flags & RTF_GATEWAY || mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); } @@ -5092,9 +5092,9 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, * local, which will cause them to be trapped with a lower * priority than packets that need to be locally received. */ - if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) + if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; - else if (rt->rt6i_flags & RTF_REJECT) + else if (rt->fib6_flags & RTF_REJECT) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; @@ -5175,18 +5175,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); - if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) continue; - if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (replace && rt->rt6i_metric == nrt->rt6i_metric) { + if (replace && rt->fib6_metric == nrt->fib6_metric) { if (mlxsw_sp_fib6_rt_can_mp(rt) == mlxsw_sp_fib6_rt_can_mp(nrt)) return fib6_entry; if (mlxsw_sp_fib6_rt_can_mp(nrt)) fallback = fallback ?: fib6_entry; } - if (rt->rt6i_metric > nrt->rt6i_metric) + if (rt->fib6_metric > nrt->fib6_metric) return fallback ?: fib6_entry; } @@ -5215,7 +5215,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, list_for_each_entry(last, &fib_node->entry_list, common.list) { struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last); - if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) + if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id) break; fib6_entry = last; } @@ -5275,22 +5275,22 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; - vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id); + vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id); if (!vr) return NULL; fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); - fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr, - sizeof(rt->rt6i_dst.addr), - rt->rt6i_dst.plen); + fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr, + sizeof(rt->fib6_dst.addr), + rt->fib6_dst.plen); if (!fib_node) return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); - if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && - rt->rt6i_metric == iter_rt->rt6i_metric && + if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id && + rt->fib6_metric == iter_rt->fib6_metric && mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) return fib6_entry; } @@ -5325,16 +5325,16 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; - if (rt->rt6i_src.plen) + if (rt->fib6_src.plen) return -EINVAL; if (mlxsw_sp_fib6_rt_should_ignore(rt)) return 0; - fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id, - &rt->rt6i_dst.addr, - sizeof(rt->rt6i_dst.addr), - rt->rt6i_dst.plen, + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id, + &rt->fib6_dst.addr, + sizeof(rt->fib6_dst.addr), + rt->fib6_dst.plen, MLXSW_SP_L3_PROTO_IPV6); if (IS_ERR(fib_node)) return PTR_ERR(fib_node); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index a36116b92100..994dd67207fb 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -134,34 +134,34 @@ struct fib6_nh { }; struct fib6_info { - struct fib6_table *rt6i_table; + struct fib6_table *fib6_table; struct fib6_info __rcu *rt6_next; - struct fib6_node __rcu *rt6i_node; + struct fib6_node __rcu *fib6_node; /* Multipath routes: * siblings is a list of fib6_info that have the the same metric/weight, * destination, but not the same gateway. nsiblings is just a cache * to speed up lookup. */ - struct list_head rt6i_siblings; - unsigned int rt6i_nsiblings; + struct list_head fib6_siblings; + unsigned int fib6_nsiblings; - atomic_t rt6i_ref; - struct inet6_dev *rt6i_idev; + atomic_t fib6_ref; + struct inet6_dev *fib6_idev; unsigned long expires; struct dst_metrics *fib6_metrics; #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1] - struct rt6key rt6i_dst; - u32 rt6i_flags; - struct rt6key rt6i_src; - struct rt6key rt6i_prefsrc; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; struct rt6_info * __percpu *rt6i_pcpu; struct rt6_exception_bucket __rcu *rt6i_exception_bucket; - u32 rt6i_metric; - u8 rt6i_protocol; + u32 fib6_metric; + u8 fib6_protocol; u8 fib6_type; u8 exception_bucket_flushed:1, should_flush:1, @@ -206,7 +206,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) static inline void fib6_clean_expires(struct fib6_info *f6i) { - f6i->rt6i_flags &= ~RTF_EXPIRES; + f6i->fib6_flags &= ~RTF_EXPIRES; f6i->expires = 0; } @@ -214,12 +214,12 @@ static inline void fib6_set_expires(struct fib6_info *f6i, unsigned long expires) { f6i->expires = expires; - f6i->rt6i_flags |= RTF_EXPIRES; + f6i->fib6_flags |= RTF_EXPIRES; } static inline bool fib6_check_expired(const struct fib6_info *f6i) { - if (f6i->rt6i_flags & RTF_EXPIRES) + if (f6i->fib6_flags & RTF_EXPIRES) return time_after(jiffies, f6i->expires); return false; } @@ -250,14 +250,14 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) * Return true if we can get cookie safely * Return false if not */ -static inline bool rt6_get_cookie_safe(const struct fib6_info *rt, +static inline bool rt6_get_cookie_safe(const struct fib6_info *f6i, u32 *cookie) { struct fib6_node *fn; bool status = false; rcu_read_lock(); - fn = rcu_dereference(rt->rt6i_node); + fn = rcu_dereference(f6i->fib6_node); if (fn) { *cookie = fn->fn_sernum; @@ -295,12 +295,12 @@ void fib6_info_destroy(struct fib6_info *f6i); static inline void fib6_info_hold(struct fib6_info *f6i) { - atomic_inc(&f6i->rt6i_ref); + atomic_inc(&f6i->fib6_ref); } static inline void fib6_info_release(struct fib6_info *f6i) { - if (f6i && atomic_dec_and_test(&f6i->rt6i_ref)) + if (f6i && atomic_dec_and_test(&f6i->fib6_ref)) fib6_info_destroy(f6i); } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index d5fb1e4ae7ac..b4b85109984f 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -66,9 +66,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } -static inline bool rt6_qualify_for_ecmp(const struct fib6_info *rt) +static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) { - return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == + return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == RTF_GATEWAY; } @@ -102,23 +102,23 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); -int ip6_ins_rt(struct net *net, struct fib6_info *rt); -int ip6_del_rt(struct net *net, struct fib6_info *rt); +int ip6_ins_rt(struct net *net, struct fib6_info *f6i); +int ip6_del_rt(struct net *net, struct fib6_info *f6i); -void rt6_flush_exceptions(struct fib6_info *rt); -void rt6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args, +void rt6_flush_exceptions(struct fib6_info *f6i); +void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, unsigned long now); -static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *rt, +static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) { - struct inet6_dev *idev = rt ? rt->rt6i_idev : NULL; + struct inet6_dev *idev = f6i ? f6i->fib6_idev : NULL; int err = 0; - if (rt && rt->rt6i_prefsrc.plen) - *saddr = rt->rt6i_prefsrc.addr; + if (f6i && f6i->fib6_prefsrc.plen) + *saddr = f6i->fib6_prefsrc.addr; else err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, daddr, prefs, saddr); @@ -176,14 +176,14 @@ struct rt6_rtnl_dump_arg { struct net *net; }; -int rt6_dump_route(struct fib6_info *rt, void *p_arg); +int rt6_dump_route(struct fib6_info *f6i, void *p_arg); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); void rt6_sync_up(struct net_device *dev, unsigned int nh_flags); void rt6_disable_ip(struct net_device *dev, unsigned long event); void rt6_sync_down_dev(struct net_device *dev, unsigned long event); -void rt6_multipath_rebalance(struct fib6_info *rt); +void rt6_multipath_rebalance(struct fib6_info *f6i); void rt6_uncached_list_add(struct rt6_info *rt); void rt6_uncached_list_del(struct rt6_info *rt); @@ -274,7 +274,7 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) { return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev && - a->rt6i_idev == b->rt6i_idev && + a->fib6_idev == b->fib6_idev && ipv6_addr_equal(&a->fib6_nh.nh_gw, &b->fib6_nh.nh_gw) && !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a294e86a9b25..f38ea829c28b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1178,19 +1178,19 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) static void cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt) { - struct fib6_info *rt; + struct fib6_info *f6i; - rt = addrconf_get_prefix_route(&ifp->addr, + f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, 0, RTF_GATEWAY | RTF_DEFAULT); - if (rt) { + if (f6i) { if (del_rt) - ip6_del_rt(dev_net(ifp->idev->dev), rt); + ip6_del_rt(dev_net(ifp->idev->dev), f6i); else { - if (!(rt->rt6i_flags & RTF_EXPIRES)) - fib6_set_expires(rt, expires); - fib6_info_release(rt); + if (!(f6i->fib6_flags & RTF_EXPIRES)) + fib6_set_expires(f6i, expires); + fib6_info_release(f6i); } } } @@ -2370,9 +2370,9 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, for_each_fib6_node_rt_rcu(fn) { if (rt->fib6_nh.nh_dev->ifindex != dev->ifindex) continue; - if ((rt->rt6i_flags & flags) != flags) + if ((rt->fib6_flags & flags) != flags) continue; - if ((rt->rt6i_flags & noflags) != 0) + if ((rt->fib6_flags & noflags) != 0) continue; fib6_info_hold(rt); break; @@ -3341,11 +3341,11 @@ static int fixup_permanent_addr(struct net *net, struct inet6_dev *idev, struct inet6_ifaddr *ifp) { - /* !rt6i_node means the host route was removed from the + /* !fib6_node means the host route was removed from the * FIB, for example, if 'lo' device is taken down. In that * case regenerate the host route. */ - if (!ifp->rt || !ifp->rt->rt6i_node) { + if (!ifp->rt || !ifp->rt->fib6_node) { struct fib6_info *rt, *prev; rt = addrconf_dst_alloc(net, idev, &ifp->addr, false, @@ -5612,7 +5612,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) * our DAD process, so we don't need * to do it again */ - if (!rcu_access_pointer(ifp->rt->rt6i_node)) + if (!rcu_access_pointer(ifp->rt->fib6_node)) ip6_ins_rt(net, ifp->rt); if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 0e35657501a7..eed3bf63bd05 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -218,10 +218,10 @@ static void aca_put(struct ifacaddr6 *ac) } } -static struct ifacaddr6 *aca_alloc(struct fib6_info *rt, +static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i, const struct in6_addr *addr) { - struct inet6_dev *idev = rt->rt6i_idev; + struct inet6_dev *idev = f6i->fib6_idev; struct ifacaddr6 *aca; aca = kzalloc(sizeof(*aca), GFP_ATOMIC); @@ -231,8 +231,8 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *rt, aca->aca_addr = *addr; in6_dev_hold(idev); aca->aca_idev = idev; - fib6_info_hold(rt); - aca->aca_rt = rt; + fib6_info_hold(f6i); + aca->aca_rt = f6i; aca->aca_users = 1; /* aca_tstamp should be updated upon changes */ aca->aca_cstamp = aca->aca_tstamp = jiffies; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 2ab49b7cac22..353f0b3e7b0d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -105,12 +105,12 @@ enum { FIB6_NO_SERNUM_CHANGE = 0, }; -void fib6_update_sernum(struct net *net, struct fib6_info *rt) +void fib6_update_sernum(struct net *net, struct fib6_info *f6i) { struct fib6_node *fn; - fn = rcu_dereference_protected(rt->rt6i_node, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + fn = rcu_dereference_protected(f6i->fib6_node, + lockdep_is_held(&f6i->fib6_table->tb6_lock)); if (fn) fn->fn_sernum = fib6_new_sernum(net); } @@ -159,10 +159,10 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags) return NULL; } - INIT_LIST_HEAD(&f6i->rt6i_siblings); + INIT_LIST_HEAD(&f6i->fib6_siblings); f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; - atomic_inc(&f6i->rt6i_ref); + atomic_inc(&f6i->fib6_ref); return f6i; } @@ -172,7 +172,7 @@ void fib6_info_destroy(struct fib6_info *f6i) struct rt6_exception_bucket *bucket; struct dst_metrics *m; - WARN_ON(f6i->rt6i_node); + WARN_ON(f6i->fib6_node); bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket, 1); if (bucket) { @@ -197,8 +197,8 @@ void fib6_info_destroy(struct fib6_info *f6i) } } - if (f6i->rt6i_idev) - in6_dev_put(f6i->rt6i_idev); + if (f6i->fib6_idev) + in6_dev_put(f6i->fib6_idev); if (f6i->fib6_nh.nh_dev) dev_put(f6i->fib6_nh.nh_dev); @@ -401,7 +401,7 @@ static int call_fib6_entry_notifiers(struct net *net, .rt = rt, }; - rt->rt6i_table->fib_seq++; + rt->fib6_table->fib_seq++; return call_fib6_notifiers(net, event_type, &info.info); } @@ -483,10 +483,10 @@ static int fib6_dump_node(struct fib6_walker *w) * last sibling of this route (no need to dump the * sibling routes again) */ - if (rt->rt6i_nsiblings) - rt = list_last_entry(&rt->rt6i_siblings, + if (rt->fib6_nsiblings) + rt = list_last_entry(&rt->fib6_siblings, struct fib6_info, - rt6i_siblings); + fib6_siblings); } w->leaf = NULL; return 0; @@ -810,7 +810,7 @@ insert_above: RCU_INIT_POINTER(in->parent, pn); in->leaf = fn->leaf; atomic_inc(&rcu_dereference_protected(in->leaf, - lockdep_is_held(&table->tb6_lock))->rt6i_ref); + lockdep_is_held(&table->tb6_lock))->fib6_ref); /* update parent pointer */ if (dir) @@ -865,9 +865,9 @@ insert_above: static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, struct net *net) { - struct fib6_table *table = rt->rt6i_table; + struct fib6_table *table = rt->fib6_table; - if (atomic_read(&rt->rt6i_ref) != 1) { + if (atomic_read(&rt->fib6_ref) != 1) { /* This route is used as dummy address holder in some split * nodes. It is not leaked, but it still holds other resources, * which must be released in time. So, scan ascendant nodes @@ -880,7 +880,7 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, struct fib6_info *new_leaf; if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) { new_leaf = fib6_find_prefix(net, table, fn); - atomic_inc(&new_leaf->rt6i_ref); + atomic_inc(&new_leaf->fib6_ref); rcu_assign_pointer(fn->leaf, new_leaf); fib6_info_release(rt); @@ -919,7 +919,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, struct netlink_ext_ack *extack) { struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); struct fib6_info *iter = NULL; struct fib6_info __rcu **ins; struct fib6_info __rcu **fallback_ins = NULL; @@ -939,12 +939,12 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, for (iter = leaf; iter; iter = rcu_dereference_protected(iter->rt6_next, - lockdep_is_held(&rt->rt6i_table->tb6_lock))) { + lockdep_is_held(&rt->fib6_table->tb6_lock))) { /* * Search for duplicates */ - if (iter->rt6i_metric == rt->rt6i_metric) { + if (iter->fib6_metric == rt->fib6_metric) { /* * Same priority level */ @@ -964,11 +964,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, } if (rt6_duplicate_nexthop(iter, rt)) { - if (rt->rt6i_nsiblings) - rt->rt6i_nsiblings = 0; - if (!(iter->rt6i_flags & RTF_EXPIRES)) + if (rt->fib6_nsiblings) + rt->fib6_nsiblings = 0; + if (!(iter->fib6_flags & RTF_EXPIRES)) return -EEXIST; - if (!(rt->rt6i_flags & RTF_EXPIRES)) + if (!(rt->fib6_flags & RTF_EXPIRES)) fib6_clean_expires(iter); else fib6_set_expires(iter, rt->expires); @@ -988,10 +988,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, */ if (rt_can_ecmp && rt6_qualify_for_ecmp(iter)) - rt->rt6i_nsiblings++; + rt->fib6_nsiblings++; } - if (iter->rt6i_metric > rt->rt6i_metric) + if (iter->fib6_metric > rt->fib6_metric) break; next_iter: @@ -1002,7 +1002,7 @@ next_iter: /* No ECMP-able route found, replace first non-ECMP one */ ins = fallback_ins; iter = rcu_dereference_protected(*ins, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); found++; } @@ -1011,34 +1011,34 @@ next_iter: fn->rr_ptr = NULL; /* Link this route to others same route. */ - if (rt->rt6i_nsiblings) { - unsigned int rt6i_nsiblings; + if (rt->fib6_nsiblings) { + unsigned int fib6_nsiblings; struct fib6_info *sibling, *temp_sibling; /* Find the first route that have the same metric */ sibling = leaf; while (sibling) { - if (sibling->rt6i_metric == rt->rt6i_metric && + if (sibling->fib6_metric == rt->fib6_metric && rt6_qualify_for_ecmp(sibling)) { - list_add_tail(&rt->rt6i_siblings, - &sibling->rt6i_siblings); + list_add_tail(&rt->fib6_siblings, + &sibling->fib6_siblings); break; } sibling = rcu_dereference_protected(sibling->rt6_next, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); } /* For each sibling in the list, increment the counter of * siblings. BUG() if counters does not match, list of siblings * is broken! */ - rt6i_nsiblings = 0; + fib6_nsiblings = 0; list_for_each_entry_safe(sibling, temp_sibling, - &rt->rt6i_siblings, rt6i_siblings) { - sibling->rt6i_nsiblings++; - BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings); - rt6i_nsiblings++; + &rt->fib6_siblings, fib6_siblings) { + sibling->fib6_nsiblings++; + BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings); + fib6_nsiblings++; } - BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings); + BUG_ON(fib6_nsiblings != rt->fib6_nsiblings); rt6_multipath_rebalance(temp_sibling); } @@ -1059,8 +1059,8 @@ add: return err; rcu_assign_pointer(rt->rt6_next, iter); - atomic_inc(&rt->rt6i_ref); - rcu_assign_pointer(rt->rt6i_node, fn); + atomic_inc(&rt->fib6_ref); + rcu_assign_pointer(rt->fib6_node, fn); rcu_assign_pointer(*ins, rt); if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); @@ -1087,8 +1087,8 @@ add: if (err) return err; - atomic_inc(&rt->rt6i_ref); - rcu_assign_pointer(rt->rt6i_node, fn); + atomic_inc(&rt->fib6_ref); + rcu_assign_pointer(rt->fib6_node, fn); rt->rt6_next = iter->rt6_next; rcu_assign_pointer(*ins, rt); if (!info->skip_notify) @@ -1097,8 +1097,8 @@ add: info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } - nsiblings = iter->rt6i_nsiblings; - iter->rt6i_node = NULL; + nsiblings = iter->fib6_nsiblings; + iter->fib6_node = NULL; fib6_purge_rt(iter, fn, info->nl_net); if (rcu_access_pointer(fn->rr_ptr) == iter) fn->rr_ptr = NULL; @@ -1108,13 +1108,13 @@ add: /* Replacing an ECMP route, remove all siblings */ ins = &rt->rt6_next; iter = rcu_dereference_protected(*ins, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); while (iter) { - if (iter->rt6i_metric > rt->rt6i_metric) + if (iter->fib6_metric > rt->fib6_metric) break; if (rt6_qualify_for_ecmp(iter)) { *ins = iter->rt6_next; - iter->rt6i_node = NULL; + iter->fib6_node = NULL; fib6_purge_rt(iter, fn, info->nl_net); if (rcu_access_pointer(fn->rr_ptr) == iter) fn->rr_ptr = NULL; @@ -1125,7 +1125,7 @@ add: ins = &iter->rt6_next; } iter = rcu_dereference_protected(*ins, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); } WARN_ON(nsiblings != 0); } @@ -1137,7 +1137,7 @@ add: static void fib6_start_gc(struct net *net, struct fib6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && - (rt->rt6i_flags & RTF_EXPIRES)) + (rt->fib6_flags & RTF_EXPIRES)) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } @@ -1152,15 +1152,15 @@ void fib6_force_start_gc(struct net *net) static void __fib6_update_sernum_upto_root(struct fib6_info *rt, int sernum) { - struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&rt->fib6_table->tb6_lock)); /* paired with smp_rmb() in rt6_get_cookie_safe() */ smp_wmb(); while (fn) { fn->fn_sernum = sernum; fn = rcu_dereference_protected(fn->parent, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); } } @@ -1179,7 +1179,7 @@ void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt) int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack) { - struct fib6_table *table = rt->rt6i_table; + struct fib6_table *table = rt->fib6_table; struct fib6_node *fn, *pn = NULL; int err = -ENOMEM; int allow_create = 1; @@ -1196,8 +1196,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(info->nl_net, table, root, - &rt->rt6i_dst.addr, rt->rt6i_dst.plen, - offsetof(struct fib6_info, rt6i_dst), allow_create, + &rt->fib6_dst.addr, rt->fib6_dst.plen, + offsetof(struct fib6_info, fib6_dst), allow_create, replace_required, extack); if (IS_ERR(fn)) { err = PTR_ERR(fn); @@ -1208,7 +1208,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, pn = fn; #ifdef CONFIG_IPV6_SUBTREES - if (rt->rt6i_src.plen) { + if (rt->fib6_src.plen) { struct fib6_node *sn; if (!rcu_access_pointer(fn->subtree)) { @@ -1229,7 +1229,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, if (!sfn) goto failure; - atomic_inc(&info->nl_net->ipv6.fib6_null_entry->rt6i_ref); + atomic_inc(&info->nl_net->ipv6.fib6_null_entry->fib6_ref); rcu_assign_pointer(sfn->leaf, info->nl_net->ipv6.fib6_null_entry); sfn->fn_flags = RTN_ROOT; @@ -1237,8 +1237,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, /* Now add the first leaf node to new subtree */ sn = fib6_add_1(info->nl_net, table, sfn, - &rt->rt6i_src.addr, rt->rt6i_src.plen, - offsetof(struct fib6_info, rt6i_src), + &rt->fib6_src.addr, rt->fib6_src.plen, + offsetof(struct fib6_info, fib6_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { @@ -1256,8 +1256,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, rcu_assign_pointer(fn->subtree, sfn); } else { sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn), - &rt->rt6i_src.addr, rt->rt6i_src.plen, - offsetof(struct fib6_info, rt6i_src), + &rt->fib6_src.addr, rt->fib6_src.plen, + offsetof(struct fib6_info, fib6_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { @@ -1272,7 +1272,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, rcu_assign_pointer(fn->leaf, info->nl_net->ipv6.fib6_null_entry); } else { - atomic_inc(&rt->rt6i_ref); + atomic_inc(&rt->fib6_ref); rcu_assign_pointer(fn->leaf, rt); } } @@ -1421,12 +1421,12 @@ struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *dad struct fib6_node *fn; struct lookup_args args[] = { { - .offset = offsetof(struct fib6_info, rt6i_dst), + .offset = offsetof(struct fib6_info, fib6_dst), .addr = daddr, }, #ifdef CONFIG_IPV6_SUBTREES { - .offset = offsetof(struct fib6_info, rt6i_src), + .offset = offsetof(struct fib6_info, fib6_src), .addr = saddr, }, #endif @@ -1511,7 +1511,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root, struct fib6_node *fn; fn = fib6_locate_1(root, daddr, dst_len, - offsetof(struct fib6_info, rt6i_dst), + offsetof(struct fib6_info, fib6_dst), exact_match); #ifdef CONFIG_IPV6_SUBTREES @@ -1522,7 +1522,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root, if (subtree) { fn = fib6_locate_1(subtree, saddr, src_len, - offsetof(struct fib6_info, rt6i_src), + offsetof(struct fib6_info, fib6_src), exact_match); } } @@ -1706,7 +1706,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Unlink it */ *rtp = rt->rt6_next; - rt->rt6i_node = NULL; + rt->fib6_node = NULL; net->ipv6.rt6_stats->fib_rt_entries--; net->ipv6.rt6_stats->fib_discarded_routes++; @@ -1718,14 +1718,14 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, fn->rr_ptr = NULL; /* Remove this entry from other siblings */ - if (rt->rt6i_nsiblings) { + if (rt->fib6_nsiblings) { struct fib6_info *sibling, *next_sibling; list_for_each_entry_safe(sibling, next_sibling, - &rt->rt6i_siblings, rt6i_siblings) - sibling->rt6i_nsiblings--; - rt->rt6i_nsiblings = 0; - list_del_init(&rt->rt6i_siblings); + &rt->fib6_siblings, fib6_siblings) + sibling->fib6_nsiblings--; + rt->fib6_nsiblings = 0; + list_del_init(&rt->fib6_siblings); rt6_multipath_rebalance(next_sibling); } @@ -1765,9 +1765,9 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { - struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); - struct fib6_table *table = rt->rt6i_table; + struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&rt->fib6_table->tb6_lock)); + struct fib6_table *table = rt->fib6_table; struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; @@ -1951,17 +1951,17 @@ static int fib6_clean_node(struct fib6_walker *w) #if RT6_DEBUG >= 2 pr_debug("%s: del failed: rt=%p@%p err=%d\n", __func__, rt, - rcu_access_pointer(rt->rt6i_node), + rcu_access_pointer(rt->fib6_node), res); #endif continue; } return 0; } else if (res == -2) { - if (WARN_ON(!rt->rt6i_nsiblings)) + if (WARN_ON(!rt->fib6_nsiblings)) continue; - rt = list_last_entry(&rt->rt6i_siblings, - struct fib6_info, rt6i_siblings); + rt = list_last_entry(&rt->fib6_siblings, + struct fib6_info, fib6_siblings); continue; } WARN_ON(res != 0); @@ -2045,7 +2045,7 @@ static int fib6_age(struct fib6_info *rt, void *arg) * Routes are expired even if they are in use. */ - if (rt->rt6i_flags & RTF_EXPIRES && rt->expires) { + if (rt->fib6_flags & RTF_EXPIRES && rt->expires) { if (time_after(now, rt->expires)) { RT6_TRACE("expiring %p\n", rt); return -1; @@ -2243,22 +2243,22 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v) struct ipv6_route_iter *iter = seq->private; const struct net_device *dev; - seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); + seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); #ifdef CONFIG_IPV6_SUBTREES - seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen); + seq_printf(seq, "%pi6 %02x ", &rt->fib6_src.addr, rt->fib6_src.plen); #else seq_puts(seq, "00000000000000000000000000000000 00 "); #endif - if (rt->rt6i_flags & RTF_GATEWAY) + if (rt->fib6_flags & RTF_GATEWAY) seq_printf(seq, "%pi6", &rt->fib6_nh.nh_gw); else seq_puts(seq, "00000000000000000000000000000000"); dev = rt->fib6_nh.nh_dev; seq_printf(seq, " %08x %08x %08x %08x %8s\n", - rt->rt6i_metric, atomic_read(&rt->rt6i_ref), 0, - rt->rt6i_flags, dev ? dev->name : ""); + rt->fib6_metric, atomic_read(&rt->fib6_ref), 0, + rt->fib6_flags, dev ? dev->name : ""); iter->w.leaf = NULL; return 0; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 102645298692..9ac5366064e3 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1318,7 +1318,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) } neigh->flags |= NTF_ROUTER; } else if (rt) { - rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); + rt->fib6_flags = (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); } if (rt) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f9c363327d62..e23ab0784e65 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -284,10 +284,10 @@ static const u32 ip6_template_metrics[RTAX_MAX] = { }; static const struct fib6_info fib6_null_entry_template = { - .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), - .rt6i_protocol = RTPROT_KERNEL, - .rt6i_metric = ~(u32)0, - .rt6i_ref = ATOMIC_INIT(1), + .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP), + .fib6_protocol = RTPROT_KERNEL, + .fib6_metric = ~(u32)0, + .fib6_ref = ATOMIC_INIT(1), .fib6_type = RTN_UNREACHABLE, .fib6_metrics = (struct dst_metrics *)&dst_default_metrics, }; @@ -429,8 +429,8 @@ static struct fib6_info *rt6_multipath_select(const struct net *net, if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound)) return match; - list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, - rt6i_siblings) { + list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings, + fib6_siblings) { int nh_upper_bound; nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound); @@ -472,12 +472,12 @@ static inline struct fib6_info *rt6_device_match(struct net *net, if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { - if (!sprt->rt6i_idev || - sprt->rt6i_idev->dev->ifindex != oif) { + if (!sprt->fib6_idev || + sprt->fib6_idev->dev->ifindex != oif) { if (flags & RT6_LOOKUP_F_IFACE) continue; if (local && - local->rt6i_idev->dev->ifindex == oif) + local->fib6_idev->dev->ifindex == oif) continue; } local = sprt; @@ -534,7 +534,7 @@ static void rt6_probe(struct fib6_info *rt) * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ - if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) + if (!rt || !(rt->fib6_flags & RTF_GATEWAY)) return; nh_gw = &rt->fib6_nh.nh_gw; @@ -550,7 +550,7 @@ static void rt6_probe(struct fib6_info *rt) if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + - rt->rt6i_idev->cnf.rtr_probe_interval)) { + rt->fib6_idev->cnf.rtr_probe_interval)) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) __neigh_set_probe_once(neigh); @@ -587,7 +587,7 @@ static inline int rt6_check_dev(struct fib6_info *rt, int oif) if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && - rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) + rt->fib6_idev && rt->fib6_idev->dev->ifindex == oif) return 1; return 0; } @@ -597,8 +597,8 @@ static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt) enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; struct neighbour *neigh; - if (rt->rt6i_flags & RTF_NONEXTHOP || - !(rt->rt6i_flags & RTF_GATEWAY)) + if (rt->fib6_flags & RTF_NONEXTHOP || + !(rt->fib6_flags & RTF_GATEWAY)) return RT6_NUD_SUCCEED; rcu_read_lock_bh(); @@ -632,7 +632,7 @@ static int rt6_score_route(struct fib6_info *rt, int oif, int strict) if (!m && (strict & RT6_LOOKUP_F_IFACE)) return RT6_NUD_FAIL_HARD; #ifdef CONFIG_IPV6_ROUTER_PREF - m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; + m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2; #endif if (strict & RT6_LOOKUP_F_REACHABLE) { int n = rt6_check_neigh(rt); @@ -648,7 +648,7 @@ static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict, { int m; bool match_do_rr = false; - struct inet6_dev *idev = rt->rt6i_idev; + struct inet6_dev *idev = rt->fib6_idev; if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) goto out; @@ -694,7 +694,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn, match = NULL; cont = NULL; for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) { - if (rt->rt6i_metric != metric) { + if (rt->fib6_metric != metric) { cont = rt; break; } @@ -704,7 +704,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn, for (rt = leaf; rt && rt != rr_head; rt = rcu_dereference(rt->rt6_next)) { - if (rt->rt6i_metric != metric) { + if (rt->fib6_metric != metric) { cont = rt; break; } @@ -741,30 +741,30 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn, * (This might happen if all routes under fn are deleted from * the tree and fib6_repair_tree() is called on the node.) */ - key_plen = rt0->rt6i_dst.plen; + key_plen = rt0->fib6_dst.plen; #ifdef CONFIG_IPV6_SUBTREES - if (rt0->rt6i_src.plen) - key_plen = rt0->rt6i_src.plen; + if (rt0->fib6_src.plen) + key_plen = rt0->fib6_src.plen; #endif if (fn->fn_bit != key_plen) return net->ipv6.fib6_null_entry; - match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict, + match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict, &do_rr); if (do_rr) { struct fib6_info *next = rcu_dereference(rt0->rt6_next); /* no entries matched; do round-robin */ - if (!next || next->rt6i_metric != rt0->rt6i_metric) + if (!next || next->fib6_metric != rt0->fib6_metric) next = leaf; if (next != rt0) { - spin_lock_bh(&leaf->rt6i_table->tb6_lock); + spin_lock_bh(&leaf->fib6_table->tb6_lock); /* make sure next is not being deleted from the tree */ - if (next->rt6i_node) + if (next->fib6_node) rcu_assign_pointer(fn->rr_ptr, next); - spin_unlock_bh(&leaf->rt6i_table->tb6_lock); + spin_unlock_bh(&leaf->fib6_table->tb6_lock); } } @@ -773,7 +773,7 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn, static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt) { - return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); + return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); } #ifdef CONFIG_IPV6_ROUTE_INFO @@ -837,8 +837,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev, pref); else if (rt) - rt->rt6i_flags = RTF_ROUTEINFO | - (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); + rt->fib6_flags = RTF_ROUTEINFO | + (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { if (!addrconf_finite_timeout(lifetime)) @@ -861,13 +861,13 @@ static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt) { struct net_device *dev = rt->fib6_nh.nh_dev; - if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { + if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) { /* for copies of local routes, dst->dev needs to be the * device if it is a master device, the master device if * device is enslaved, and the loopback as the default */ if (netif_is_l3_slave(dev) && - !rt6_need_strict(&rt->rt6i_dst.addr)) + !rt6_need_strict(&rt->fib6_dst.addr)) dev = l3mdev_master_dev_rcu(dev); else if (!netif_is_l3_master(dev)) dev = dev_net(dev)->loopback_dev; @@ -939,7 +939,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) { rt->dst.flags |= fib6_info_dst_flags(ort); - if (ort->rt6i_flags & RTF_REJECT) { + if (ort->fib6_flags & RTF_REJECT) { ip6_rt_init_dst_reject(rt, ort); return; } @@ -949,7 +949,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) if (ort->fib6_type == RTN_LOCAL) { rt->dst.input = ip6_input; - } else if (ipv6_addr_type(&ort->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) { + } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { rt->dst.input = ip6_mc_input; } else { rt->dst.input = ip6_forward; @@ -979,17 +979,17 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) { ip6_rt_init_dst(rt, ort); - rt->rt6i_dst = ort->rt6i_dst; - rt->rt6i_idev = ort->rt6i_idev; + rt->rt6i_dst = ort->fib6_dst; + rt->rt6i_idev = ort->fib6_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->rt6i_gateway = ort->fib6_nh.nh_gw; - rt->rt6i_flags = ort->rt6i_flags; + rt->rt6i_flags = ort->fib6_flags; rt6_set_from(rt, ort); #ifdef CONFIG_IPV6_SUBTREES - rt->rt6i_src = ort->rt6i_src; + rt->rt6i_src = ort->fib6_src; #endif - rt->rt6i_prefsrc = ort->rt6i_prefsrc; + rt->rt6i_prefsrc = ort->fib6_prefsrc; rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); } @@ -1064,7 +1064,7 @@ restart: } else { f6i = rt6_device_match(net, f6i, &fl6->saddr, fl6->flowi6_oif, flags); - if (f6i->rt6i_nsiblings && fl6->flowi6_oif == 0) + if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0) f6i = rt6_multipath_select(net, f6i, fl6, fl6->flowi6_oif, skb, flags); } @@ -1142,7 +1142,7 @@ static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info, int err; struct fib6_table *table; - table = rt->rt6i_table; + table = rt->fib6_table; spin_lock_bh(&table->tb6_lock); err = fib6_add(&table->tb6_root, rt, info, extack); spin_unlock_bh(&table->tb6_lock); @@ -1182,8 +1182,8 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, rt->rt6i_dst.plen = 128; if (!rt6_is_gw_or_nonexthop(ort)) { - if (ort->rt6i_dst.plen != 128 && - ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) + if (ort->fib6_dst.plen != 128 && + ipv6_addr_equal(&ort->fib6_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { @@ -1375,7 +1375,7 @@ static unsigned int fib6_mtu(const struct fib6_info *rt) { unsigned int mtu; - mtu = rt->fib6_pmtu ? : rt->rt6i_idev->cnf.mtu6; + mtu = rt->fib6_pmtu ? : rt->fib6_idev->cnf.mtu6; mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu); @@ -1416,14 +1416,14 @@ static int rt6_insert_exception(struct rt6_info *nrt, * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ - if (ort->rt6i_src.plen) + if (ort->fib6_src.plen) src_key = &nrt->rt6i_src.addr; #endif /* Update rt6i_prefsrc as it could be changed * in rt6_remove_prefsrc() */ - nrt->rt6i_prefsrc = ort->rt6i_prefsrc; + nrt->rt6i_prefsrc = ort->fib6_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. @@ -1457,9 +1457,9 @@ out: /* Update fn->fn_sernum to invalidate all cached dst */ if (!err) { - spin_lock_bh(&ort->rt6i_table->tb6_lock); + spin_lock_bh(&ort->fib6_table->tb6_lock); fib6_update_sernum(net, ort); - spin_unlock_bh(&ort->rt6i_table->tb6_lock); + spin_unlock_bh(&ort->fib6_table->tb6_lock); fib6_force_start_gc(net); } @@ -1514,7 +1514,7 @@ static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ - if (rt->rt6i_src.plen) + if (rt->fib6_src.plen) src_key = saddr; #endif rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); @@ -1551,7 +1551,7 @@ static int rt6_remove_exception_rt(struct rt6_info *rt) * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ - if (from->rt6i_src.plen) + if (from->fib6_src.plen) src_key = &rt->rt6i_src.addr; #endif rt6_ex = __rt6_find_exception_spinlock(&bucket, @@ -1592,7 +1592,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ - if (from->rt6i_src.plen) + if (from->fib6_src.plen) src_key = &rt->rt6i_src.addr; #endif rt6_ex = __rt6_find_exception_rcu(&bucket, @@ -1810,7 +1810,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, redo_rt6_select: f6i = rt6_select(net, fn, oif, strict); - if (f6i->rt6i_nsiblings) + if (f6i->fib6_nsiblings) f6i = rt6_multipath_select(net, f6i, fl6, oif, skb, strict); if (f6i == net->ipv6.fib6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); @@ -1842,7 +1842,7 @@ redo_rt6_select: trace_fib6_table_lookup(net, rt, table, fl6); return rt; } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && - !(f6i->rt6i_flags & RTF_GATEWAY))) { + !(f6i->fib6_flags & RTF_GATEWAY))) { /* Create a RTF_CACHE clone which will not be * owned by the fib6 tree. It is for the special case where * the daddr in the skb during the neighbor look-up is different @@ -2206,7 +2206,7 @@ static void ip6_link_failure(struct sk_buff *skb) struct fib6_node *fn; rcu_read_lock(); - fn = rcu_dereference(rt->from->rt6i_node); + fn = rcu_dereference(rt->from->fib6_node); if (fn && (rt->rt6i_flags & RTF_DEFAULT)) fn->fn_sernum = -1; rcu_read_unlock(); @@ -2372,9 +2372,9 @@ restart: continue; if (fib6_check_expired(rt)) continue; - if (rt->rt6i_flags & RTF_REJECT) + if (rt->fib6_flags & RTF_REJECT) break; - if (!(rt->rt6i_flags & RTF_GATEWAY)) + if (!(rt->fib6_flags & RTF_GATEWAY)) continue; if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex) continue; @@ -2400,7 +2400,7 @@ restart: if (!rt) rt = net->ipv6.fib6_null_entry; - else if (rt->rt6i_flags & RTF_REJECT) { + else if (rt->fib6_flags & RTF_REJECT) { ret = net->ipv6.ip6_null_entry; goto out; } @@ -2907,7 +2907,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; - rt->rt6i_protocol = cfg->fc_protocol; + rt->fib6_protocol = cfg->fc_protocol; addr_type = ipv6_addr_type(&cfg->fc_dst); @@ -2922,17 +2922,17 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate); } - ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); - rt->rt6i_dst.plen = cfg->fc_dst_len; - if (rt->rt6i_dst.plen == 128) + ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); + rt->fib6_dst.plen = cfg->fc_dst_len; + if (rt->fib6_dst.plen == 128) rt->dst_host = true; #ifdef CONFIG_IPV6_SUBTREES - ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); - rt->rt6i_src.plen = cfg->fc_src_len; + ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len); + rt->fib6_src.plen = cfg->fc_src_len; #endif - rt->rt6i_metric = cfg->fc_metric; + rt->fib6_metric = cfg->fc_metric; rt->fib6_nh.nh_weight = 1; rt->fib6_type = cfg->fc_type; @@ -2958,7 +2958,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, goto out; } } - rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; + rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP; goto install_route; } @@ -2992,21 +2992,21 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, err = -EINVAL; goto out; } - rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; - rt->rt6i_prefsrc.plen = 128; + rt->fib6_prefsrc.addr = cfg->fc_prefsrc; + rt->fib6_prefsrc.plen = 128; } else - rt->rt6i_prefsrc.plen = 0; + rt->fib6_prefsrc.plen = 0; - rt->rt6i_flags = cfg->fc_flags; + rt->fib6_flags = cfg->fc_flags; install_route: - if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) && + if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) && !netif_carrier_ok(dev)) rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK); rt->fib6_nh.nh_dev = dev; - rt->rt6i_idev = idev; - rt->rt6i_table = table; + rt->fib6_idev = idev; + rt->fib6_table = table; cfg->fc_nlinfo.nl_net = dev_net(dev); @@ -3048,7 +3048,7 @@ static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info) goto out; } - table = rt->rt6i_table; + table = rt->fib6_table; spin_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); spin_unlock_bh(&table->tb6_lock); @@ -3075,10 +3075,10 @@ static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) if (rt == net->ipv6.fib6_null_entry) goto out_put; - table = rt->rt6i_table; + table = rt->fib6_table; spin_lock_bh(&table->tb6_lock); - if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { + if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) { struct fib6_info *sibling, *next_sibling; /* prefer to send a single notification with all hops */ @@ -3096,8 +3096,8 @@ static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) } list_for_each_entry_safe(sibling, next_sibling, - &rt->rt6i_siblings, - rt6i_siblings) { + &rt->fib6_siblings, + fib6_siblings) { err = fib6_del(sibling, info); if (err) goto out_unlock; @@ -3176,9 +3176,9 @@ static int ip6_route_del(struct fib6_config *cfg, if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw)) continue; - if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) + if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric) continue; - if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) + if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) continue; fib6_info_hold(rt); rcu_read_unlock(); @@ -3336,7 +3336,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net, for_each_fib6_node_rt_rcu(fn) { if (rt->fib6_nh.nh_dev->ifindex != ifindex) continue; - if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) + if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) continue; @@ -3396,7 +3396,7 @@ struct fib6_info *rt6_get_dflt_router(struct net *net, rcu_read_lock(); for_each_fib6_node_rt_rcu(&table->tb6_root) { if (dev == rt->fib6_nh.nh_dev && - ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && + ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) break; } @@ -3445,8 +3445,8 @@ static void __rt6_purge_dflt_routers(struct net *net, restart: rcu_read_lock(); for_each_fib6_node_rt_rcu(&table->tb6_root) { - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && - (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { + if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && + (!rt->fib6_idev || rt->fib6_idev->cnf.accept_ra != 2)) { fib6_info_hold(rt); rcu_read_unlock(); ip6_del_rt(net, rt); @@ -3607,26 +3607,26 @@ struct fib6_info *addrconf_dst_alloc(struct net *net, rt->dst_nocount = true; in6_dev_hold(idev); - rt->rt6i_idev = idev; + rt->fib6_idev = idev; rt->dst_host = true; - rt->rt6i_protocol = RTPROT_KERNEL; - rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; + rt->fib6_protocol = RTPROT_KERNEL; + rt->fib6_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) { rt->fib6_type = RTN_ANYCAST; - rt->rt6i_flags |= RTF_ANYCAST; + rt->fib6_flags |= RTF_ANYCAST; } else { rt->fib6_type = RTN_LOCAL; - rt->rt6i_flags |= RTF_LOCAL; + rt->fib6_flags |= RTF_LOCAL; } rt->fib6_nh.nh_gw = *addr; dev_hold(dev); rt->fib6_nh.nh_dev = dev; - rt->rt6i_dst.addr = *addr; - rt->rt6i_dst.plen = 128; + rt->fib6_dst.addr = *addr; + rt->fib6_dst.plen = 128; tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; - rt->rt6i_table = fib6_get_table(net, tb_id); + rt->fib6_table = fib6_get_table(net, tb_id); return rt; } @@ -3646,10 +3646,10 @@ static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) if (((void *)rt->fib6_nh.nh_dev == dev || !dev) && rt != net->ipv6.fib6_null_entry && - ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { + ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) { spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ - rt->rt6i_prefsrc.plen = 0; + rt->fib6_prefsrc.plen = 0; /* need to update cache as well */ rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); @@ -3675,7 +3675,7 @@ static int fib6_clean_tohost(struct fib6_info *rt, void *arg) { struct in6_addr *gateway = (struct in6_addr *)arg; - if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && + if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) { return -1; } @@ -3707,16 +3707,16 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) struct fib6_info *iter; struct fib6_node *fn; - fn = rcu_dereference_protected(rt->rt6i_node, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&rt->fib6_table->tb6_lock)); iter = rcu_dereference_protected(fn->leaf, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); while (iter) { - if (iter->rt6i_metric == rt->rt6i_metric && + if (iter->fib6_metric == rt->fib6_metric && rt6_qualify_for_ecmp(iter)) return iter; iter = rcu_dereference_protected(iter->rt6_next, - lockdep_is_held(&rt->rt6i_table->tb6_lock)); + lockdep_is_held(&rt->fib6_table->tb6_lock)); } return NULL; @@ -3726,7 +3726,7 @@ static bool rt6_is_dead(const struct fib6_info *rt) { if (rt->fib6_nh.nh_flags & RTNH_F_DEAD || (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && - rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) + rt->fib6_idev->cnf.ignore_routes_with_linkdown)) return true; return false; @@ -3740,7 +3740,7 @@ static int rt6_multipath_total_weight(const struct fib6_info *rt) if (!rt6_is_dead(rt)) total += rt->fib6_nh.nh_weight; - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) { + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { if (!rt6_is_dead(iter)) total += iter->fib6_nh.nh_weight; } @@ -3767,7 +3767,7 @@ static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total) rt6_upper_bound_set(rt, &weight, total); - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) rt6_upper_bound_set(iter, &weight, total); } @@ -3780,7 +3780,7 @@ void rt6_multipath_rebalance(struct fib6_info *rt) * then there is no need to rebalance upon the removal of every * sibling route. */ - if (!rt->rt6i_nsiblings || rt->should_flush) + if (!rt->fib6_nsiblings || rt->should_flush) return; /* During lookup routes are evaluated in order, so we need to @@ -3831,7 +3831,7 @@ static bool rt6_multipath_uses_dev(const struct fib6_info *rt, if (rt->fib6_nh.nh_dev == dev) return true; - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) if (iter->fib6_nh.nh_dev == dev) return true; @@ -3843,7 +3843,7 @@ static void rt6_multipath_flush(struct fib6_info *rt) struct fib6_info *iter; rt->should_flush = 1; - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) iter->should_flush = 1; } @@ -3856,7 +3856,7 @@ static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt, if (rt->fib6_nh.nh_dev == down_dev || rt->fib6_nh.nh_flags & RTNH_F_DEAD) dead++; - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) if (iter->fib6_nh.nh_dev == down_dev || iter->fib6_nh.nh_flags & RTNH_F_DEAD) dead++; @@ -3872,7 +3872,7 @@ static void rt6_multipath_nh_flags_set(struct fib6_info *rt, if (rt->fib6_nh.nh_dev == dev) rt->fib6_nh.nh_flags |= nh_flags; - list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) if (iter->fib6_nh.nh_dev == dev) iter->fib6_nh.nh_flags |= nh_flags; } @@ -3893,13 +3893,13 @@ static int fib6_ifdown(struct fib6_info *rt, void *p_arg) case NETDEV_DOWN: if (rt->should_flush) return -1; - if (!rt->rt6i_nsiblings) + if (!rt->fib6_nsiblings) return rt->fib6_nh.nh_dev == dev ? -1 : 0; if (rt6_multipath_uses_dev(rt, dev)) { unsigned int count; count = rt6_multipath_dead_count(rt, dev); - if (rt->rt6i_nsiblings + 1 == count) { + if (rt->fib6_nsiblings + 1 == count) { rt6_multipath_flush(rt); return -1; } @@ -3911,7 +3911,7 @@ static int fib6_ifdown(struct fib6_info *rt, void *p_arg) return -2; case NETDEV_CHANGE: if (rt->fib6_nh.nh_dev != dev || - rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) + rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) break; rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; rt6_multipath_rebalance(rt); @@ -4188,10 +4188,10 @@ static void ip6_route_mpath_notify(struct fib6_info *rt, * nexthop. Since sibling routes are always added at the end of * the list, find the first sibling of the last route appended */ - if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { - rt = list_first_entry(&rt_last->rt6i_siblings, + if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) { + rt = list_first_entry(&rt_last->fib6_siblings, struct fib6_info, - rt6i_siblings); + fib6_siblings); } if (rt) @@ -4410,13 +4410,13 @@ static size_t rt6_nlmsg_size(struct fib6_info *rt) { int nexthop_len = 0; - if (rt->rt6i_nsiblings) { + if (rt->fib6_nsiblings) { nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate); - nexthop_len *= rt->rt6i_nsiblings; + nexthop_len *= rt->fib6_nsiblings; } return NLMSG_ALIGN(sizeof(struct rtmsg)) @@ -4444,11 +4444,11 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt, if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) { *flags |= RTNH_F_LINKDOWN; - if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) + if (rt->fib6_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; } - if (rt->rt6i_flags & RTF_GATEWAY) { + if (rt->fib6_flags & RTF_GATEWAY) { if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0) goto nla_put_failure; } @@ -4518,11 +4518,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; - rtm->rtm_dst_len = rt->rt6i_dst.plen; - rtm->rtm_src_len = rt->rt6i_src.plen; + rtm->rtm_dst_len = rt->fib6_dst.plen; + rtm->rtm_src_len = rt->fib6_src.plen; rtm->rtm_tos = 0; - if (rt->rt6i_table) - table = rt->rt6i_table->tb6_id; + if (rt->fib6_table) + table = rt->fib6_table->tb6_id; else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; @@ -4532,9 +4532,9 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, rtm->rtm_type = rt->fib6_type; rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; - rtm->rtm_protocol = rt->rt6i_protocol; + rtm->rtm_protocol = rt->fib6_protocol; - if (rt->rt6i_flags & RTF_CACHE) + if (rt->fib6_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dest) { @@ -4542,7 +4542,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) - if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) + if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { @@ -4550,12 +4550,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && - nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) + nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE - if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { + if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) @@ -4573,9 +4573,9 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } - if (rt->rt6i_prefsrc.plen) { + if (rt->fib6_prefsrc.plen) { struct in6_addr saddr_buf; - saddr_buf = rt->rt6i_prefsrc.addr; + saddr_buf = rt->fib6_prefsrc.addr; if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } @@ -4584,13 +4584,13 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, if (rtnetlink_put_metrics(skb, pmetrics) < 0) goto nla_put_failure; - if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) + if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric)) goto nla_put_failure; /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ - if (rt->rt6i_nsiblings) { + if (rt->fib6_nsiblings) { struct fib6_info *sibling, *next_sibling; struct nlattr *mp; @@ -4602,7 +4602,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; list_for_each_entry_safe(sibling, next_sibling, - &rt->rt6i_siblings, rt6i_siblings) { + &rt->fib6_siblings, fib6_siblings) { if (rt6_add_nexthop(skb, sibling) < 0) goto nla_put_failure; } @@ -4613,7 +4613,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } - if (rt->rt6i_flags & RTF_EXPIRES) { + if (rt->fib6_flags & RTF_EXPIRES) { expires = dst ? dst->expires : rt->expires; expires -= jiffies; } @@ -4621,7 +4621,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) goto nla_put_failure; - if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) + if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) goto nla_put_failure; @@ -4646,7 +4646,7 @@ int rt6_dump_route(struct fib6_info *rt, void *p_arg) /* user wants prefix routes only */ if (rtm->rtm_flags & RTM_F_PREFIX && - !(rt->rt6i_flags & RTF_PREFIX_RT)) { + !(rt->fib6_flags & RTF_PREFIX_RT)) { /* success since this is not a prefix route */ return 1; } @@ -4820,7 +4820,7 @@ static int ip6_route_dev_notify(struct notifier_block *this, if (event == NETDEV_REGISTER) { net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev; - net->ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(dev); + net->ipv6.fib6_null_entry->fib6_idev = in6_dev_get(dev); net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES @@ -4834,7 +4834,7 @@ static int ip6_route_dev_notify(struct notifier_block *this, /* NETDEV_UNREGISTER could be fired for multiple times by * netdev_wait_allrefs(). Make sure we only call this once. */ - in6_dev_put_clear(&net->ipv6.fib6_null_entry->rt6i_idev); + in6_dev_put_clear(&net->ipv6.fib6_null_entry->fib6_idev); in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); @@ -5157,7 +5157,7 @@ void __init ip6_route_init_special_entries(void) * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev; - init_net.ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); + init_net.ipv6.fib6_null_entry->fib6_idev = in6_dev_get(init_net.loopback_dev); init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES -- cgit v1.2.3 From 712e5a5ce774a1542a09687441be7be456fe8b62 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:49 +0200 Subject: net: phy_ mdio-gpio: Fixup , which should be ; Seems like an old typ0. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 4333c6e14742..369f5f35d6fd 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -161,7 +161,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, if (!new_bus) goto out; - new_bus->name = "GPIO Bitbanged MDIO", + new_bus->name = "GPIO Bitbanged MDIO"; new_bus->phy_mask = pdata->phy_mask; new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; -- cgit v1.2.3 From 9e4d60938a2b2aae3c2c213c923d9eb8d0a87ba2 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:50 +0200 Subject: net: phy: mdio-gpio: Remove reset function The platform data can contain a function to call to reset the bit banging interface. It is not used, so remove it. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 1 - include/linux/platform_data/mdio-gpio.h | 2 -- 2 files changed, 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 369f5f35d6fd..570b87b82abc 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -141,7 +141,6 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, goto out; bitbang->ctrl.ops = &mdio_gpio_ops; - bitbang->ctrl.reset = pdata->reset; mdc = pdata->mdc; bitbang->mdc = gpio_to_desc(mdc); if (pdata->mdc_active_low) diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 11f00cdabe3d..6e8f01a570f2 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -26,8 +26,6 @@ struct mdio_gpio_platform_data { u32 phy_mask; u32 phy_ignore_ta_mask; int irqs[PHY_MAX_ADDR]; - /* reset callback */ - int (*reset)(struct mii_bus *bus); }; #endif /* __LINUX_MDIO_GPIO_H */ -- cgit v1.2.3 From a3283e2576736463e07ddc684efb2e587a3bbda2 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:51 +0200 Subject: net: phy: mdio-bitbang: Remove reset support The mdio-gpio driver was the only user of the interface reset option. Since it no longer uses it, remove it from the bit banging code. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bitbang.c | 9 --------- include/linux/mdio-bitbang.h | 2 -- 2 files changed, 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 403b085f0a89..15352f987bdf 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -205,14 +205,6 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) return 0; } -static int mdiobb_reset(struct mii_bus *bus) -{ - struct mdiobb_ctrl *ctrl = bus->priv; - if (ctrl->reset) - ctrl->reset(bus); - return 0; -} - struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; @@ -225,7 +217,6 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) bus->read = mdiobb_read; bus->write = mdiobb_write; - bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index a8ac9cfa014c..5d71e8a8500f 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h @@ -33,8 +33,6 @@ struct mdiobb_ops { struct mdiobb_ctrl { const struct mdiobb_ops *ops; - /* reset callback */ - int (*reset)(struct mii_bus *bus); }; /* The returned bus is not yet registered with the phy layer. */ -- cgit v1.2.3 From c1b3eb04682f80af74572aa25601dbb555c6bc6e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:52 +0200 Subject: net: phy: mdio-gpio: remove support for ignoring turn around This is not needed any more by devices using platform data, so remove it. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 1 - include/linux/platform_data/mdio-gpio.h | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 570b87b82abc..28ad9ca7b9e7 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -163,7 +163,6 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->name = "GPIO Bitbanged MDIO"; new_bus->phy_mask = pdata->phy_mask; - new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); new_bus->parent = dev; diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 6e8f01a570f2..b8f914a30126 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -24,7 +24,6 @@ struct mdio_gpio_platform_data { bool mdo_active_low; u32 phy_mask; - u32 phy_ignore_ta_mask; int irqs[PHY_MAX_ADDR]; }; -- cgit v1.2.3 From 185a16b60a239f9db3fe8b8ce931a2fd61330853 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:53 +0200 Subject: net: phy: mdio-gpio: remove support for phy mask This is not needed any more by devices using platform data, so remove it. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 4 ---- include/linux/platform_data/mdio-gpio.h | 1 - 2 files changed, 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 28ad9ca7b9e7..676ba0dd04be 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -162,13 +162,9 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->name = "GPIO Bitbanged MDIO"; - new_bus->phy_mask = pdata->phy_mask; memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); new_bus->parent = dev; - if (new_bus->phy_mask == ~0) - goto out_free_bus; - for (i = 0; i < PHY_MAX_ADDR; i++) if (!new_bus->irq[i]) new_bus->irq[i] = PHY_POLL; diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index b8f914a30126..7d55dfef56dc 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -23,7 +23,6 @@ struct mdio_gpio_platform_data { bool mdio_active_low; bool mdo_active_low; - u32 phy_mask; int irqs[PHY_MAX_ADDR]; }; -- cgit v1.2.3 From 68abb4f25d7eaf4d5f40108aa748621ddab68209 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:54 +0200 Subject: net: phy: mdio-gpio: Remove support for IRQs in platform data No current devices use IRQs in platform data, so remove support for it. The MDIO core will also initialise the new bus such that all addresses are polled, so remove the unneeded re-initialisation. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 7 ------- include/linux/platform_data/mdio-gpio.h | 2 -- 2 files changed, 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 676ba0dd04be..0f8c748c8edd 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -130,7 +130,6 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, { struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; - int i; int mdc, mdio, mdo; unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; unsigned long mdio_flags = GPIOF_DIR_IN; @@ -161,14 +160,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, goto out; new_bus->name = "GPIO Bitbanged MDIO"; - - memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); new_bus->parent = dev; - for (i = 0; i < PHY_MAX_ADDR; i++) - if (!new_bus->irq[i]) - new_bus->irq[i] = PHY_POLL; - if (bus_id != -1) snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); else diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 7d55dfef56dc..af3be0c4ff9b 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -22,8 +22,6 @@ struct mdio_gpio_platform_data { bool mdc_active_low; bool mdio_active_low; bool mdo_active_low; - - int irqs[PHY_MAX_ADDR]; }; #endif /* __LINUX_MDIO_GPIO_H */ -- cgit v1.2.3 From c82fc4814a93f36c9b536b5af8dd617e4ee1380f Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:55 +0200 Subject: net: phy: mdio-gpio: Swap to using gpio descriptors This simplifies the code, removing the need to handle active low flags, etc. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 73 ++++++++------------------------- include/linux/platform_data/mdio-gpio.h | 10 ++--- 2 files changed, 20 insertions(+), 63 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 0f8c748c8edd..b999f32374e2 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -35,35 +35,25 @@ struct mdio_gpio_info { struct gpio_desc *mdc, *mdio, *mdo; }; -static void *mdio_gpio_of_get_data(struct platform_device *pdev) +static void *mdio_gpio_of_get_data(struct device *dev) { - struct device_node *np = pdev->dev.of_node; struct mdio_gpio_platform_data *pdata; - enum of_gpio_flags flags; - int ret; - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; - ret = of_get_gpio_flags(np, 0, &flags); - if (ret < 0) - return NULL; - - pdata->mdc = ret; - pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW; + pdata->mdc = devm_gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); + if (IS_ERR(pdata->mdc)) + return ERR_CAST(pdata->mdc); - ret = of_get_gpio_flags(np, 1, &flags); - if (ret < 0) - return NULL; - pdata->mdio = ret; - pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW; + pdata->mdio = devm_gpiod_get_index(dev, NULL, 1, GPIOD_IN); + if (IS_ERR(pdata->mdio)) + return ERR_CAST(pdata->mdio); - ret = of_get_gpio_flags(np, 2, &flags); - if (ret > 0) { - pdata->mdo = ret; - pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW; - } + pdata->mdo = devm_gpiod_get_index_optional(dev, NULL, 2, GPIOD_OUT_LOW); + if (IS_ERR(pdata->mdo)) + return ERR_CAST(pdata->mdo); return pdata; } @@ -130,34 +120,19 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, { struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; - int mdc, mdio, mdo; - unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; - unsigned long mdio_flags = GPIOF_DIR_IN; - unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) - goto out; + return NULL; bitbang->ctrl.ops = &mdio_gpio_ops; - mdc = pdata->mdc; - bitbang->mdc = gpio_to_desc(mdc); - if (pdata->mdc_active_low) - mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW; - mdio = pdata->mdio; - bitbang->mdio = gpio_to_desc(mdio); - if (pdata->mdio_active_low) - mdio_flags |= GPIOF_ACTIVE_LOW; - mdo = pdata->mdo; - if (mdo) { - bitbang->mdo = gpio_to_desc(mdo); - if (pdata->mdo_active_low) - mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW; - } + bitbang->mdc = pdata->mdc; + bitbang->mdio = pdata->mdio; + bitbang->mdo = pdata->mdo; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) - goto out; + return NULL; new_bus->name = "GPIO Bitbanged MDIO"; new_bus->parent = dev; @@ -167,23 +142,9 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc")) - goto out_free_bus; - - if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio")) - goto out_free_bus; - - if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo")) - goto out_free_bus; - dev_set_drvdata(dev, new_bus); return new_bus; - -out_free_bus: - free_mdio_bitbang(new_bus); -out: - return NULL; } static void mdio_gpio_bus_deinit(struct device *dev) @@ -208,7 +169,7 @@ static int mdio_gpio_probe(struct platform_device *pdev) int ret, bus_id; if (pdev->dev.of_node) { - pdata = mdio_gpio_of_get_data(pdev); + pdata = mdio_gpio_of_get_data(&pdev->dev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { dev_warn(&pdev->dev, "failed to get alias id\n"); diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index af3be0c4ff9b..bd91fa98a3aa 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -15,13 +15,9 @@ struct mdio_gpio_platform_data { /* GPIO numbers for bus pins */ - unsigned int mdc; - unsigned int mdio; - unsigned int mdo; - - bool mdc_active_low; - bool mdio_active_low; - bool mdo_active_low; + struct gpio_desc *mdc; + struct gpio_desc *mdio; + struct gpio_desc *mdo; }; #endif /* __LINUX_MDIO_GPIO_H */ -- cgit v1.2.3 From fb766421d881a849c80c7cb80e8ccbbddeb55d9d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:56 +0200 Subject: net: phy: mdio-gpio: Move allocation for bitbanging data Moving the allocation of this structure to the probe function is a step towards making it the core data structure of the driver. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index b999f32374e2..bb57a9e89a18 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -115,15 +115,11 @@ static const struct mdiobb_ops mdio_gpio_ops = { }; static struct mii_bus *mdio_gpio_bus_init(struct device *dev, + struct mdio_gpio_info *bitbang, struct mdio_gpio_platform_data *pdata, int bus_id) { struct mii_bus *new_bus; - struct mdio_gpio_info *bitbang; - - bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); - if (!bitbang) - return NULL; bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->mdc = pdata->mdc; @@ -165,9 +161,14 @@ static void mdio_gpio_bus_destroy(struct device *dev) static int mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata; + struct mdio_gpio_info *bitbang; struct mii_bus *new_bus; int ret, bus_id; + bitbang = devm_kzalloc(&pdev->dev, sizeof(*bitbang), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; + if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(&pdev->dev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); @@ -183,7 +184,7 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (!pdata) return -ENODEV; - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); + new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, pdata, bus_id); if (!new_bus) return -ENODEV; -- cgit v1.2.3 From 4029ea3a8625bc699210623a106e887c2761fead Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:57 +0200 Subject: net: phy: mdio-gpio: Parse properties directly into bitbang structure The same parsing code can be used for both OF and platform devices, if the platform device uses a gpiod_lookup_table. Parse these properties directly into the bitbang structure, rather than use an intermediate platform data structure. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index bb57a9e89a18..74b982835ac1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -35,27 +35,20 @@ struct mdio_gpio_info { struct gpio_desc *mdc, *mdio, *mdo; }; -static void *mdio_gpio_of_get_data(struct device *dev) +static int mdio_gpio_get_data(struct device *dev, + struct mdio_gpio_info *bitbang) { - struct mdio_gpio_platform_data *pdata; + bitbang->mdc = devm_gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); + if (IS_ERR(bitbang->mdc)) + return PTR_ERR(bitbang->mdc); - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - pdata->mdc = devm_gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); - if (IS_ERR(pdata->mdc)) - return ERR_CAST(pdata->mdc); - - pdata->mdio = devm_gpiod_get_index(dev, NULL, 1, GPIOD_IN); - if (IS_ERR(pdata->mdio)) - return ERR_CAST(pdata->mdio); - - pdata->mdo = devm_gpiod_get_index_optional(dev, NULL, 2, GPIOD_OUT_LOW); - if (IS_ERR(pdata->mdo)) - return ERR_CAST(pdata->mdo); + bitbang->mdio = devm_gpiod_get_index(dev, NULL, 1, GPIOD_IN); + if (IS_ERR(bitbang->mdio)) + return PTR_ERR(bitbang->mdio); - return pdata; + bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, 2, + GPIOD_OUT_LOW); + return PTR_ERR_OR_ZERO(bitbang->mdo); } static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) @@ -116,15 +109,11 @@ static const struct mdiobb_ops mdio_gpio_ops = { static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_info *bitbang, - struct mdio_gpio_platform_data *pdata, int bus_id) { struct mii_bus *new_bus; bitbang->ctrl.ops = &mdio_gpio_ops; - bitbang->mdc = pdata->mdc; - bitbang->mdio = pdata->mdio; - bitbang->mdo = pdata->mdo; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) @@ -160,7 +149,6 @@ static void mdio_gpio_bus_destroy(struct device *dev) static int mdio_gpio_probe(struct platform_device *pdev) { - struct mdio_gpio_platform_data *pdata; struct mdio_gpio_info *bitbang; struct mii_bus *new_bus; int ret, bus_id; @@ -169,22 +157,21 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (!bitbang) return -ENOMEM; + ret = mdio_gpio_get_data(&pdev->dev, bitbang); + if (ret) + return ret; + if (pdev->dev.of_node) { - pdata = mdio_gpio_of_get_data(&pdev->dev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { dev_warn(&pdev->dev, "failed to get alias id\n"); bus_id = 0; } } else { - pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; } - if (!pdata) - return -ENODEV; - - new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, pdata, bus_id); + new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, bus_id); if (!new_bus) return -ENODEV; -- cgit v1.2.3 From fb78a95e22123da57cb79b98bdc62eb33965ca8a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:58 +0200 Subject: net: phy: mdio-gpio: Add #defines for the GPIO index's The GPIOs are described in device tree using a list, without names. Add defines to indicate what each index in the list means. These defines should also be used by platform devices passing GPIOs via a GPIO lookup table. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 10 +++++++--- include/linux/mdio-gpio.h | 9 +++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 include/linux/mdio-gpio.h (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 74b982835ac1..281c905ef9fd 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include @@ -38,15 +40,17 @@ struct mdio_gpio_info { static int mdio_gpio_get_data(struct device *dev, struct mdio_gpio_info *bitbang) { - bitbang->mdc = devm_gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); + bitbang->mdc = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDC, + GPIOD_OUT_LOW); if (IS_ERR(bitbang->mdc)) return PTR_ERR(bitbang->mdc); - bitbang->mdio = devm_gpiod_get_index(dev, NULL, 1, GPIOD_IN); + bitbang->mdio = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDIO, + GPIOD_IN); if (IS_ERR(bitbang->mdio)) return PTR_ERR(bitbang->mdio); - bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, 2, + bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, MDIO_GPIO_MDO, GPIOD_OUT_LOW); return PTR_ERR_OR_ZERO(bitbang->mdo); } diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h new file mode 100644 index 000000000000..cea443a672cb --- /dev/null +++ b/include/linux/mdio-gpio.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MDIO_GPIO_H +#define __LINUX_MDIO_GPIO_H + +#define MDIO_GPIO_MDC 0 +#define MDIO_GPIO_MDIO 1 +#define MDIO_GPIO_MDO 2 + +#endif -- cgit v1.2.3 From 0207dd1173fe31c153ffd439c4bb33d1341829b1 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:59 +0200 Subject: net: phy: mdio-gpio: Remove redundant platform data header The platform data header file is now unused. Remove it, but add an extra include which it brought in. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 1 - drivers/net/phy/mdio-gpio.c | 2 +- include/linux/platform_data/mdio-gpio.h | 23 ----------------------- 3 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 include/linux/platform_data/mdio-gpio.h (limited to 'drivers/net') diff --git a/MAINTAINERS b/MAINTAINERS index b60179d948bb..a7321687cae9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5321,7 +5321,6 @@ F: include/linux/*mdio*.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h -F: include/linux/platform_data/mdio-gpio.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 281c905ef9fd..b501221819e1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h deleted file mode 100644 index bd91fa98a3aa..000000000000 --- a/include/linux/platform_data/mdio-gpio.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * MDIO-GPIO bus platform data structures - * - * Copyright (C) 2008, Paulius Zaleckas - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __LINUX_MDIO_GPIO_H -#define __LINUX_MDIO_GPIO_H - -#include - -struct mdio_gpio_platform_data { - /* GPIO numbers for bus pins */ - struct gpio_desc *mdc; - struct gpio_desc *mdio; - struct gpio_desc *mdo; -}; - -#endif /* __LINUX_MDIO_GPIO_H */ -- cgit v1.2.3 From 31dd83b966410915a56358a8a3fc13c54d016fb3 Mon Sep 17 00:00:00 2001 From: Michael Schmitz Date: Thu, 19 Apr 2018 14:05:18 +1200 Subject: net-next: phy: new Asix Electronics PHY driver The Asix Electronics PHY found on the X-Surf 100 Amiga Zorro network card by Individual Computers is buggy, and needs the reset bit toggled as workaround to make a PHY soft reset succeed. Add workaround driver just for this special case. Suggested in xsurf100 patch series review by Andrew Lunn Signed-off-by: Michael Schmitz Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 6 +++++ drivers/net/phy/Makefile | 1 + drivers/net/phy/asix.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+) create mode 100644 drivers/net/phy/asix.c (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bdfbabb86ee0..edb8b9ab827f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -218,6 +218,12 @@ config AQUANTIA_PHY ---help--- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 +config ASIX_PHY + tristate "Asix PHYs" + help + Currently supports the Asix Electronics PHY found in the X-Surf 100 + AX88796B package. + config AT803X_PHY tristate "AT803X PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 01acbcb2c798..701ca0b8717e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -45,6 +45,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o +obj-$(CONFIG_ASIX_PHY) += asix.o obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c new file mode 100644 index 000000000000..8ebe7f5484ae --- /dev/null +++ b/drivers/net/phy/asix.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Asix PHYs + * + * Author: Michael Schmitz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include + +#define PHY_ID_ASIX_AX88796B 0x003b1841 + +MODULE_DESCRIPTION("Asix PHY driver"); +MODULE_AUTHOR("Michael Schmitz "); +MODULE_LICENSE("GPL"); + +/** + * asix_soft_reset - software reset the PHY via BMCR_RESET bit + * @phydev: target phy_device struct + * + * Description: Perform a software PHY reset using the standard + * BMCR_RESET bit and poll for the reset bit to be cleared. + * Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation + * such as used on the Individual Computers' X-Surf 100 Zorro card. + * + * Returns: 0 on success, < 0 on failure + */ +static int asix_soft_reset(struct phy_device *phydev) +{ + int ret; + + /* Asix PHY won't reset unless reset bit toggles */ + ret = phy_write(phydev, MII_BMCR, 0); + if (ret < 0) + return ret; + + return genphy_soft_reset(phydev); +} + +static struct phy_driver asix_driver[] = { { + .phy_id = PHY_ID_ASIX_AX88796B, + .name = "Asix Electronics AX88796B", + .phy_id_mask = 0xfffffff0, + .features = PHY_BASIC_FEATURES, + .soft_reset = asix_soft_reset, +} }; + +module_phy_driver(asix_driver); + +static struct mdio_device_id __maybe_unused asix_tbl[] = { + { PHY_ID_ASIX_AX88796B, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, asix_tbl); -- cgit v1.2.3 From 7a0907bd399f6f2d40906c6d38ed1814d314ef74 Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:19 +1200 Subject: net-next: ax88796: Fix MAC address reading To read the MAC address from the (virtual) SAprom, the remote DMA unit needs to be set up like for every other process access to card-local memory. Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index da61cf3cb3a9..ae3937535838 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -669,10 +669,16 @@ static int ax_init_dev(struct net_device *dev) if (ax->plat->flags & AXFLG_HAS_EEPROM) { unsigned char SA_prom[32]; + ei_outb(6, ioaddr + EN0_RCNTLO); + ei_outb(0, ioaddr + EN0_RCNTHI); + ei_outb(0, ioaddr + EN0_RSARLO); + ei_outb(0, ioaddr + EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD); for (i = 0; i < sizeof(SA_prom); i += 2) { SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); } + ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */ if (ax->plat->wordlength == 2) for (i = 0; i < 16; i++) -- cgit v1.2.3 From fd5f375c162807b4e39676c540e26400c091cf4f Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:20 +1200 Subject: net-next: ax88796: Attach MII bus only when open Call ax_mii_init in ax_open(), and unregister/remove mdiobus resources in ax_close(). This is needed to be able to unload the module, as the module is busy while the MII bus is attached. Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 183 +++++++++++++++++++----------------- 1 file changed, 95 insertions(+), 88 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index ae3937535838..ab020e601ec9 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -387,6 +387,90 @@ static void ax_phy_switch(struct net_device *dev, int on) ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); } +static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (level) + ax->reg_memr |= AX_MEMR_MDC; + else + ax->reg_memr &= ~AX_MEMR_MDC; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (output) + ax->reg_memr &= ~AX_MEMR_MDIR; + else + ax->reg_memr |= AX_MEMR_MDIR; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (value) + ax->reg_memr |= AX_MEMR_MDO; + else + ax->reg_memr &= ~AX_MEMR_MDO; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + int reg_memr = ei_inb(ax->addr_memr); + + return reg_memr & AX_MEMR_MDI ? 1 : 0; +} + +static const struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ax_bb_mdc, + .set_mdio_dir = ax_bb_dir, + .set_mdio_data = ax_bb_set_data, + .get_mdio_data = ax_bb_get_data, +}; + +static int ax_mii_init(struct net_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + int err; + + ax->bb_ctrl.ops = &bb_ops; + ax->addr_memr = ei_local->mem + AX_MEMR; + ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); + if (!ax->mii_bus) { + err = -ENOMEM; + goto out; + } + + ax->mii_bus->name = "ax88796_mii_bus"; + ax->mii_bus->parent = dev->dev.parent; + snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + err = mdiobus_register(ax->mii_bus); + if (err) + goto out_free_mdio_bitbang; + + return 0; + + out_free_mdio_bitbang: + free_mdio_bitbang(ax->mii_bus); + out: + return err; +} + static int ax_open(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); @@ -394,6 +478,10 @@ static int ax_open(struct net_device *dev) netdev_dbg(dev, "open\n"); + ret = ax_mii_init(dev); + if (ret) + goto failed_mii; + ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, dev->name, dev); if (ret) @@ -421,6 +509,10 @@ static int ax_open(struct net_device *dev) ax_phy_switch(dev, 0); free_irq(dev->irq, dev); failed_request_irq: + /* unregister mdiobus */ + mdiobus_unregister(ax->mii_bus); + free_mdio_bitbang(ax->mii_bus); + failed_mii: return ret; } @@ -440,6 +532,9 @@ static int ax_close(struct net_device *dev) phy_disconnect(dev->phydev); free_irq(dev->irq, dev); + + mdiobus_unregister(ax->mii_bus); + free_mdio_bitbang(ax->mii_bus); return 0; } @@ -539,92 +634,8 @@ static const struct net_device_ops ax_netdev_ops = { #endif }; -static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (level) - ax->reg_memr |= AX_MEMR_MDC; - else - ax->reg_memr &= ~AX_MEMR_MDC; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (output) - ax->reg_memr &= ~AX_MEMR_MDIR; - else - ax->reg_memr |= AX_MEMR_MDIR; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (value) - ax->reg_memr |= AX_MEMR_MDO; - else - ax->reg_memr &= ~AX_MEMR_MDO; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - int reg_memr = ei_inb(ax->addr_memr); - - return reg_memr & AX_MEMR_MDI ? 1 : 0; -} - -static const struct mdiobb_ops bb_ops = { - .owner = THIS_MODULE, - .set_mdc = ax_bb_mdc, - .set_mdio_dir = ax_bb_dir, - .set_mdio_data = ax_bb_set_data, - .get_mdio_data = ax_bb_get_data, -}; - /* setup code */ -static int ax_mii_init(struct net_device *dev) -{ - struct platform_device *pdev = to_platform_device(dev->dev.parent); - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - int err; - - ax->bb_ctrl.ops = &bb_ops; - ax->addr_memr = ei_local->mem + AX_MEMR; - ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); - if (!ax->mii_bus) { - err = -ENOMEM; - goto out; - } - - ax->mii_bus->name = "ax88796_mii_bus"; - ax->mii_bus->parent = dev->dev.parent; - snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - err = mdiobus_register(ax->mii_bus); - if (err) - goto out_free_mdio_bitbang; - - return 0; - - out_free_mdio_bitbang: - free_mdio_bitbang(ax->mii_bus); - out: - return err; -} - static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) { void __iomem *ioaddr = ei_local->mem; @@ -755,10 +766,6 @@ static int ax_init_dev(struct net_device *dev) dev->netdev_ops = &ax_netdev_ops; dev->ethtool_ops = &ax_ethtool_ops; - ret = ax_mii_init(dev); - if (ret) - goto err_out; - ax_NS8390_init(dev, 0); ret = register_netdev(dev); -- cgit v1.2.3 From 9144c3795c2636351d553e4d0fc5297201182de2 Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:21 +1200 Subject: net-next: ax88796: Do not free IRQ in ax_remove() (already freed in ax_close()). This complements the fix in 82533ad9a1c ("net: ethernet: ax88796: don't call free_irq without request_irq first") that removed the free_irq call in the error path of probe, to also not call free_irq when remove is called to revert the effects of probe. Fixes: 82533ad9a1c (net: ethernet: ax88796: don't call free_irq without request_irq first) Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index ab020e601ec9..d3f30f1c40b2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -790,7 +790,6 @@ static int ax_remove(struct platform_device *pdev) struct resource *mem; unregister_netdev(dev); - free_irq(dev->irq, dev); iounmap(ei_local->mem); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v1.2.3 From 27cced20192d25ae528db8fe694c95c7656f3d56 Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:22 +1200 Subject: net-next: ax88796: Add block_input/output hooks to ax_plat_data Add platform specific hooks for block transfer reads/writes of packet buffer data, superseding the default provided ax_block_input/output. Currently used for m68k Amiga XSurf100. Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 10 ++++++++-- include/net/ax88796.h | 9 +++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index d3f30f1c40b2..939a572a19ff 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -758,8 +758,14 @@ static int ax_init_dev(struct net_device *dev) #endif ei_local->reset_8390 = &ax_reset_8390; - ei_local->block_input = &ax_block_input; - ei_local->block_output = &ax_block_output; + if (ax->plat->block_input) + ei_local->block_input = ax->plat->block_input; + else + ei_local->block_input = &ax_block_input; + if (ax->plat->block_output) + ei_local->block_output = ax->plat->block_output; + else + ei_local->block_output = &ax_block_output; ei_local->get_8390_hdr = &ax_get_8390_hdr; ei_local->priv = 0; diff --git a/include/net/ax88796.h b/include/net/ax88796.h index b9a3beca0ce4..363b0ca5f7e8 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -12,6 +12,9 @@ #ifndef __NET_AX88796_PLAT_H #define __NET_AX88796_PLAT_H +struct sk_buff; +struct net_device; + #define AXFLG_HAS_EEPROM (1<<0) #define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */ #define AXFLG_HAS_93CX6 (1<<2) /* use eeprom_93cx6 driver */ @@ -26,6 +29,12 @@ struct ax_plat_data { u32 *reg_offsets; /* register offsets */ u8 *mac_addr; /* MAC addr (only used when AXFLG_MAC_FROMPLATFORM is used */ + + /* uses default ax88796 buffer if set to NULL */ + void (*block_output)(struct net_device *dev, int count, + const unsigned char *buf, int star_page); + void (*block_input)(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); }; #endif /* __NET_AX88796_PLAT_H */ -- cgit v1.2.3 From cec4c1c54a643608c262bd9bb72cf9bbec64f44a Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:23 +1200 Subject: net-next: ax88796: add interrupt status callback to platform data To be able to tell the ax88796 driver whether it is sensible to enter the 8390 interrupt handler, an "is this interrupt caused by the 88796" callback has been added to the ax_plat_data structure (with NULL being compatible to the previous behaviour). Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 23 +++++++++++++++++++++-- include/net/ax88796.h | 5 +++++ 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 939a572a19ff..d283ed092519 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -163,6 +163,21 @@ static void ax_reset_8390(struct net_device *dev) ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ } +/* Wrapper for __ei_interrupt for platforms that have a platform-specific + * way to find out whether the interrupt request might be caused by + * the ax88796 chip. + */ +static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ax_device *ax = to_ax_dev(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + + if (!ax->plat->check_irq(pdev)) + return IRQ_NONE; + + return ax_ei_interrupt(irq, dev_id); +} static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) @@ -482,8 +497,12 @@ static int ax_open(struct net_device *dev) if (ret) goto failed_mii; - ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, - dev->name, dev); + if (ax->plat->check_irq) + ret = request_irq(dev->irq, ax_ei_interrupt_filtered, + ax->irqflags, dev->name, dev); + else + ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, + dev->name, dev); if (ret) goto failed_request_irq; diff --git a/include/net/ax88796.h b/include/net/ax88796.h index 363b0ca5f7e8..84b3785d0e66 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -14,6 +14,7 @@ struct sk_buff; struct net_device; +struct platform_device; #define AXFLG_HAS_EEPROM (1<<0) #define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */ @@ -35,6 +36,10 @@ struct ax_plat_data { const unsigned char *buf, int star_page); void (*block_input)(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); + /* returns nonzero if a pending interrupt request might by caused by + * the ax88786. Handles all interrupts if set to NULL + */ + int (*check_irq)(struct platform_device *pdev); }; #endif /* __NET_AX88796_PLAT_H */ -- cgit v1.2.3 From caaf45a6ba064103fabc2cad46bb5caeec41859f Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:24 +1200 Subject: net-next: ax88796: set IRQF_SHARED flag when IRQ resource is marked as shareable On the Amiga X-Surf100, the network card interrupt is shared with many other interrupt sources, so requires the IRQF_SHARED flag to register. Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index d283ed092519..229279f325a2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -872,6 +872,9 @@ static int ax_probe(struct platform_device *pdev) dev->irq = irq->start; ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; + if (irq->flags & IORESOURCE_IRQ_SHAREABLE) + ax->irqflags |= IRQF_SHARED; + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no MEM specified\n"); -- cgit v1.2.3 From 453da988785fab2658b88ab57b992cacc9dd3b7d Mon Sep 17 00:00:00 2001 From: Michael Schmitz Date: Thu, 19 Apr 2018 14:05:25 +1200 Subject: net-next: ax88796: release platform device drvdata on probe error and module remove The net device struct pointer is stored as platform device drvdata on module probe - clear the drvdata entry on probe fail there, as well as when unloading the module. Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 229279f325a2..2a0ddec1dd56 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -826,6 +826,7 @@ static int ax_remove(struct platform_device *pdev) release_mem_region(mem->start, resource_size(mem)); } + platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; @@ -959,6 +960,7 @@ static int ax_probe(struct platform_device *pdev) release_mem_region(mem->start, mem_size); exit_mem: + platform_set_drvdata(pdev, NULL); free_netdev(dev); return ret; -- cgit v1.2.3 From 861928f4e60e826cd8871c0c37f4b3d825b8d81d Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 19 Apr 2018 14:05:26 +1200 Subject: net-next: New ax88796 platform driver for Amiga X-Surf 100 Zorro board (m68k) Add platform device driver to populate the ax88796 platform data from information provided by the XSurf100 zorro device driver. The ax88796 module will be loaded through this module's probe function. Signed-off-by: Michael Karcher Signed-off-by: Michael Schmitz Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/Kconfig | 17 +- drivers/net/ethernet/8390/Makefile | 1 + drivers/net/ethernet/8390/xsurf100.c | 382 +++++++++++++++++++++++++++++++++++ 3 files changed, 398 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/8390/xsurf100.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 9fee7c83ef9f..f2f0264c58ba 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -29,8 +29,8 @@ config PCMCIA_AXNET called axnet_cs. If unsure, say N. config AX88796 - tristate "ASIX AX88796 NE2000 clone support" - depends on (ARM || MIPS || SUPERH) + tristate "ASIX AX88796 NE2000 clone support" if !ZORRO + depends on (ARM || MIPS || SUPERH || ZORRO || COMPILE_TEST) select CRC32 select PHYLIB select MDIO_BITBANG @@ -45,6 +45,19 @@ config AX88796_93CX6 ---help--- Select this if your platform comes with an external 93CX6 eeprom. +config XSURF100 + tristate "Amiga XSurf 100 AX88796/NE2000 clone support" + depends on ZORRO + select AX88796 + select ASIX_PHY + help + This driver is for the Individual Computers X-Surf 100 Ethernet + card (based on the Asix AX88796 chip). If you have such a card, + say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called xsurf100. + config HYDRA tristate "Hydra support" depends on ZORRO diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index 1d650e66cc6e..85c83c566ec6 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -16,4 +16,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o obj-$(CONFIG_WD80x3) += wd.o 8390.o +obj-$(CONFIG_XSURF100) += xsurf100.o obj-$(CONFIG_ZORRO8390) += zorro8390.o diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c new file mode 100644 index 000000000000..e2c963821ffe --- /dev/null +++ b/drivers/net/ethernet/8390/xsurf100.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \ + ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0) + +#define XS100_IRQSTATUS_BASE 0x40 +#define XS100_8390_BASE 0x800 + +/* Longword-access area. Translated to 2 16-bit access cycles by the + * X-Surf 100 FPGA + */ +#define XS100_8390_DATA32_BASE 0x8000 +#define XS100_8390_DATA32_SIZE 0x2000 +/* Sub-Areas for fast data register access; addresses relative to area begin */ +#define XS100_8390_DATA_READ32_BASE 0x0880 +#define XS100_8390_DATA_WRITE32_BASE 0x0C80 +#define XS100_8390_DATA_AREA_SIZE 0x80 + +#define __NS8390_init ax_NS8390_init + +/* force unsigned long back to 'void __iomem *' */ +#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) + +#define ei_inb(_a) z_readb(ax_convert_addr(_a)) +#define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a)) + +#define ei_inw(_a) z_readw(ax_convert_addr(_a)) +#define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a)) + +#define ei_inb_p(_a) ei_inb(_a) +#define ei_outb_p(_v, _a) ei_outb(_v, _a) + +/* define EI_SHIFT() to take into account our register offsets */ +#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) + +/* Ensure we have our RCR base value */ +#define AX88796_PLATFORM + +static unsigned char version[] = + "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; + +#include "lib8390.c" + +/* from ne.c */ +#define NE_CMD EI_SHIFT(0x00) +#define NE_RESET EI_SHIFT(0x1f) +#define NE_DATAPORT EI_SHIFT(0x10) + +struct xsurf100_ax_plat_data { + struct ax_plat_data ax; + void __iomem *base_regs; + void __iomem *data_area; +}; + +static int is_xsurf100_network_irq(struct platform_device *pdev) +{ + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0; +} + +/* These functions guarantee that the iomem is accessed with 32 bit + * cycles only. z_memcpy_fromio / z_memcpy_toio don't + */ +static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes) +{ + while (bytes > 32) { + asm __volatile__ + ("movem.l (%0)+,%%d0-%%d7\n" + "movem.l %%d0-%%d7,(%1)\n" + "adda.l #32,%1" : "=a"(src), "=a"(dst) + : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4", + "d5", "d6", "d7", "memory"); + bytes -= 32; + } + while (bytes) { + *(uint32_t *)dst = z_readl(src); + src += 4; + dst += 4; + bytes -= 4; + } +} + +static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes) +{ + while (bytes) { + z_writel(*(const uint32_t *)src, dst); + src += 4; + dst += 4; + bytes -= 4; + } +} + +static void xs100_write(struct net_device *dev, const void *src, + unsigned int count) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + /* copy whole blocks */ + while (count > XS100_8390_DATA_AREA_SIZE) { + z_memcpy_toio32(xs100->data_area + + XS100_8390_DATA_WRITE32_BASE, src, + XS100_8390_DATA_AREA_SIZE); + src += XS100_8390_DATA_AREA_SIZE; + count -= XS100_8390_DATA_AREA_SIZE; + } + /* copy whole dwords */ + z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE, + src, count & ~3); + src += count & ~3; + if (count & 2) { + ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT); + src += 2; + } + if (count & 1) + ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT); +} + +static void xs100_read(struct net_device *dev, void *dst, unsigned int count) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + /* copy whole blocks */ + while (count > XS100_8390_DATA_AREA_SIZE) { + z_memcpy_fromio32(dst, xs100->data_area + + XS100_8390_DATA_READ32_BASE, + XS100_8390_DATA_AREA_SIZE); + dst += XS100_8390_DATA_AREA_SIZE; + count -= XS100_8390_DATA_AREA_SIZE; + } + /* copy whole dwords */ + z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE, + count & ~3); + dst += count & ~3; + if (count & 2) { + *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT); + dst += 2; + } + if (count & 1) + *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT); +} + +/* Block input and output, similar to the Crynwr packet driver. If + * you are porting to a new ethercard, look at the packet driver + * source for hints. The NEx000 doesn't share the on-board packet + * memory -- you have to put the packet out through the "remote DMA" + * dataport using ei_outb. + */ +static void xs100_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + char *buf = skb->data; + + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD); + + xs100_read(dev, buf, count); + + ei_local->dmaing &= ~1; +} + +static void xs100_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + unsigned long dma_start; + + /* Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? I + * should check someday. + */ + if (ei_local->word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing + * you'll see + */ + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(0x00, nic_base + EN0_RSARLO); + ei_outb(start_page, nic_base + EN0_RSARHI); + + ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); + + xs100_write(dev, buf, count); + + dma_start = jiffies; + + while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { + if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ei_local->reset_8390(dev); + ax_NS8390_init(dev, 1); + break; + } + } + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; +} + +static int xsurf100_probe(struct zorro_dev *zdev, + const struct zorro_device_id *ent) +{ + struct platform_device *pdev; + struct xsurf100_ax_plat_data ax88796_data; + struct resource res[2] = { + DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL, + IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE), + DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE, + 4 * 0x20) + }; + int reg; + /* This table is referenced in the device structure, so it must + * outlive the scope of xsurf100_probe. + */ + static u32 reg_offsets[32]; + int ret = 0; + + /* X-Surf 100 control and 32 bit ring buffer data access areas. + * These resources are not used by the ax88796 driver, so must + * be requested here and passed via platform data. + */ + + if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) { + dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n"); + return -ENXIO; + } + + if (!request_mem_region(zdev->resource.start + + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE, + "X-Surf 100 32-bit data access")) { + dev_err(&zdev->dev, "cannot reserve 32-bit area\n"); + ret = -ENXIO; + goto exit_req; + } + + for (reg = 0; reg < 0x20; reg++) + reg_offsets[reg] = 4 * reg; + + memset(&ax88796_data, 0, sizeof(ax88796_data)); + ax88796_data.ax.flags = AXFLG_HAS_EEPROM; + ax88796_data.ax.wordlength = 2; + ax88796_data.ax.dcr_val = 0x48; + ax88796_data.ax.rcr_val = 0x40; + ax88796_data.ax.reg_offsets = reg_offsets; + ax88796_data.ax.check_irq = is_xsurf100_network_irq; + ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100); + + /* error handling for ioremap regs */ + if (!ax88796_data.base_regs) { + dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n", + &zdev->resource); + + ret = -ENXIO; + goto exit_req2; + } + + ax88796_data.data_area = ioremap(zdev->resource.start + + XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE); + + /* error handling for ioremap data */ + if (!ax88796_data.data_area) { + dev_err(&zdev->dev, + "Cannot ioremap area %pR offset %x (32-bit access)\n", + &zdev->resource, XS100_8390_DATA32_BASE); + + ret = -ENXIO; + goto exit_mem; + } + + ax88796_data.ax.block_output = xs100_block_output; + ax88796_data.ax.block_input = xs100_block_input; + + pdev = platform_device_register_resndata(&zdev->dev, "ax88796", + zdev->slotaddr, res, 2, + &ax88796_data, + sizeof(ax88796_data)); + + if (IS_ERR(pdev)) { + dev_err(&zdev->dev, "cannot register platform device\n"); + ret = -ENXIO; + goto exit_mem2; + } + + zorro_set_drvdata(zdev, pdev); + + if (!ret) + return 0; + + exit_mem2: + iounmap(ax88796_data.data_area); + + exit_mem: + iounmap(ax88796_data.base_regs); + + exit_req2: + release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE); + + exit_req: + release_mem_region(zdev->resource.start, 0x100); + + return ret; +} + +static void xsurf100_remove(struct zorro_dev *zdev) +{ + struct platform_device *pdev = zorro_get_drvdata(zdev); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + platform_device_unregister(pdev); + + iounmap(xs100->base_regs); + release_mem_region(zdev->resource.start, 0x100); + iounmap(xs100->data_area); + release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE); +} + +static const struct zorro_device_id xsurf100_zorro_tbl[] = { + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, }, + { 0 } +}; + +MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl); + +static struct zorro_driver xsurf100_driver = { + .name = "xsurf100", + .id_table = xsurf100_zorro_tbl, + .probe = xsurf100_probe, + .remove = xsurf100_remove, +}; + +module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver); + +MODULE_DESCRIPTION("X-Surf 100 driver"); +MODULE_AUTHOR("Michael Karcher "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 7d9d0d562b54d2953304693f76bc2bbfbe318c27 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 12 Apr 2018 16:15:07 +0300 Subject: iwlwifi: mvm: add traffic condition monitoring (TCM) Traffic condition monitor gathers data about the traffic load and other conditions and can be used to make decisions regarding latency, throughput etc. This patch introduces the code and data structures to collect this data for future use. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 3 + drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 7 + .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 2 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 59 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 10 + drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 60 +++++- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 6 + drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 11 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 35 ++++ drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 231 +++++++++++++++++++++ 10 files changed, 415 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 96b52a275ee3..2763fc69f04b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -112,6 +112,9 @@ #define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 +#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ +#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */ +#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 2efe9b099556..80a9a7cb83be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1097,6 +1097,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) /* make sure the d0i3 exit work is not pending */ flush_work(&mvm->d0i3_exit_work); + iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); @@ -2014,6 +2015,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_mvm_resume_tcm(mvm); + iwl_fw_runtime_resume(&mvm->fwrt); return ret; @@ -2042,6 +2045,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + iwl_mvm_pause_tcm(mvm, true); + iwl_fw_runtime_suspend(&mvm->fwrt); /* start pseudo D3 */ @@ -2104,6 +2109,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) __iwl_mvm_resume(mvm, true); rtnl_unlock(); + iwl_mvm_resume_tcm(mvm); + iwl_fw_runtime_resume(&mvm->fwrt); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f7fcf700196b..798605c4f122 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -269,6 +269,8 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->id, mvmvif->color); pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", vif->bss_conf.bssid); + pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n", + mvm->tcm.result.load[mvmvif->id]); pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) pos += scnprintf(buf+pos, bufsz-pos, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d2cf751db68d..72bab44082ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -92,6 +92,8 @@ #include "fw/acpi.h" #include "fw/debugfs.h" +#include + #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 @@ -595,6 +597,51 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW, + IWL_MVM_TRAFFIC_MEDIUM, + IWL_MVM_TRAFFIC_HIGH, +}; + +DECLARE_EWMA(rate, 16, 16) + +struct iwl_mvm_tcm_mac { + struct { + u32 pkts[IEEE80211_NUM_ACS]; + u32 airtime; + } tx; + struct { + u32 pkts[IEEE80211_NUM_ACS]; + u32 airtime; + u32 last_ampdu_ref; + } rx; + struct { + /* track AP's transfer in client mode */ + u64 rx_bytes; + struct ewma_rate rate; + bool detected; + } uapsd_nonagg_detect; +}; + +struct iwl_mvm_tcm { + struct delayed_work work; + spinlock_t lock; /* used when time elapsed */ + unsigned long ts; /* timestamp when period ends */ + unsigned long ll_ts; + unsigned long uapsd_nonagg_ts; + bool paused; + struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; + struct { + u32 elapsed; /* milliseconds for this TCM period */ + u32 airtime[NUM_MAC_INDEX_DRIVER]; + enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; + enum iwl_mvm_traffic_load global_load; + bool low_latency[NUM_MAC_INDEX_DRIVER]; + bool change[NUM_MAC_INDEX_DRIVER]; + bool global_change; + } result; +}; + /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn @@ -978,6 +1025,9 @@ struct iwl_mvm { */ bool temperature_test; /* Debug test temperature is enabled */ + unsigned long bt_coex_last_tcm_ts; + struct iwl_mvm_tcm tcm; + struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE @@ -1906,6 +1956,15 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); +#define MVM_TCM_PERIOD_MSEC 500 +#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) +#define MVM_LL_PERIOD (10 * HZ) +void iwl_mvm_tcm_work(struct work_struct *work); +void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); +void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); +void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); +u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); + void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 224bfa1bcf53..6a3f557543e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -667,6 +667,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); + spin_lock_init(&mvm->tcm.lock); + INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); + mvm->tcm.ts = jiffies; + mvm->tcm.ll_ts = jiffies; + mvm->tcm.uapsd_nonagg_ts = jiffies; + INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); /* @@ -859,6 +865,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + cancel_delayed_work_sync(&mvm->tcm.work); + iwl_mvm_tof_clean(mvm); mutex_destroy(&mvm->mutex); @@ -1432,6 +1440,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = false; } + iwl_mvm_pause_tcm(mvm, true); /* make sure we have no running tx while configuring the seqno */ synchronize_net(); @@ -1615,6 +1624,7 @@ out: /* the FW might have updated the regdomain */ iwl_mvm_update_changed_regdom(mvm); + iwl_mvm_resume_tcm(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d26833c5ce1f..be80294349c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -254,6 +254,39 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, u32 len, + struct iwl_rx_phy_info *phy_info, + u32 rate_n_flags) +{ + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_tcm_mac *mdata; + int mac; + int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ + + if (ieee80211_is_data_qos(hdr->frame_control)) + ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + + if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + mdata = &mvm->tcm.data[mac]; + mdata->rx.pkts[ac]++; + + /* count the airtime only once for each ampdu */ + if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) { + mdata->rx.last_ampdu_ref = mvm->ampdu_ref; + mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); + } + + if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) + return; + +} + static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, u32 status) @@ -408,6 +441,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, NULL); } + if (!mvm->tcm.paused && len >= sizeof(*hdr) && + !is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, + rate_n_flags); + if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, rx_pkt_status); } @@ -654,7 +693,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, int expected_size; int i; u8 *energy; - __le32 *bytes, *air_time; + __le32 *air_time; __le32 flags; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { @@ -729,13 +768,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data; energy = (void *)&v11->load_stats.avg_energy; - bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; - bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } @@ -752,6 +789,23 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, sta->avg_energy = energy[i]; } rcu_read_unlock(); + + /* + * Don't update in case the statistics are not cleared, since + * we will end up counting twice the same airtime, once in TCM + * request and once in statistics notification. + */ + if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)) + return; + + spin_lock(&mvm->tcm.lock); + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; + u32 airtime = le32_to_cpu(air_time[i]); + + mdata->rx.airtime += airtime; + } + spin_unlock(&mvm->tcm.lock); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 4a4ccfd11e5b..f9cd7575b422 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -941,6 +941,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); + if (!mvm->tcm.paused && len >= sizeof(*hdr) && + !is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_data(hdr->frame_control) && + time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + /* * We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index b31f0ffbbbf0..cd7ae6976935 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -76,12 +76,6 @@ #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 -enum iwl_mvm_traffic_load { - IWL_MVM_TRAFFIC_LOW, - IWL_MVM_TRAFFIC_MEDIUM, - IWL_MVM_TRAFFIC_HIGH, -}; - #define IWL_SCAN_DWELL_ACTIVE 10 #define IWL_SCAN_DWELL_PASSIVE 110 #define IWL_SCAN_DWELL_FRAGMENTED 44 @@ -437,6 +431,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ieee80211_scan_completed(mvm->hw, &info); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); + iwl_mvm_resume_tcm(mvm); } else { IWL_ERR(mvm, "got scan complete notification but no scan is running\n"); @@ -1568,6 +1563,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return ret; + iwl_mvm_pause_tcm(mvm, false); + ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { /* If the scan failed, it usually means that the FW was unable @@ -1575,6 +1572,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * should try to send the command again with different params. */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); + iwl_mvm_resume_tcm(mvm); return ret; } @@ -1711,6 +1709,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_vif = NULL; iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); + iwl_mvm_resume_tcm(mvm); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 795065974d78..f06a0ee7be28 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -930,6 +930,32 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) return false; } +static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + int airtime) +{ + int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + if (mvm->tcm.paused) + return; + + if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + + mdata->tx.airtime += airtime; +} + +static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int tid) +{ + u32 ac = tid_to_mac80211_ac[tid]; + int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + mdata->tx.pkts[ac]++; +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -1067,6 +1093,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); + iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); + return 0; drop_unlock_sta: @@ -1469,6 +1497,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, if (!IS_ERR(sta)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); + iwl_mvm_tx_airtime(mvm, mvmsta, + le16_to_cpu(tx_resp->wireless_media_time)); + if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1610,6 +1641,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); + iwl_mvm_tx_airtime(mvm, mvmsta, + le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); @@ -1800,6 +1833,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) le32_to_cpu(ba_res->tx_rate)); } + iwl_mvm_tx_airtime(mvm, mvmsta, + le32_to_cpu(ba_res->wireless_time)); out_unlock: rcu_read_unlock(); out: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d99d9ea78e4c..27905f3fe3ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1429,6 +1429,237 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, sta->addr, tid); } +u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed) +{ + if (!elapsed) + return 0; + + return (100 * airtime / elapsed) / USEC_PER_MSEC; +} + +static enum iwl_mvm_traffic_load +iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) +{ + u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed); + + if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH) + return IWL_MVM_TRAFFIC_HIGH; + if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH) + return IWL_MVM_TRAFFIC_MEDIUM; + + return IWL_MVM_TRAFFIC_LOW; +} + +struct iwl_mvm_tcm_iter_data { + struct iwl_mvm *mvm; + bool any_sent; +}; + +static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mvm_tcm_iter_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; + + if (mvmvif->id >= NUM_MAC_INDEX_DRIVER) + return; + + low_latency = mvm->tcm.result.low_latency[mvmvif->id]; + + if (!mvm->tcm.result.change[mvmvif->id] && + prev == low_latency) { + iwl_mvm_update_quotas(mvm, false, NULL); + return; + } + + if (prev != low_latency) { + /* this sends traffic load and updates quota as well */ + iwl_mvm_update_low_latency(mvm, vif, low_latency, + LOW_LATENCY_TRAFFIC); + } else { + iwl_mvm_update_quotas(mvm, false, NULL); + } + + data->any_sent = true; +} + +static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tcm_iter_data data = { + .mvm = mvm, + .any_sent = false, + }; + + mutex_lock(&mvm->mutex); + + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tcm_iter, &data); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); + + mutex_unlock(&mvm->mutex); +} + + +static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, + unsigned long ts, + bool handle_uapsd) +{ + unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); + u32 total_airtime = 0; + int ac, mac; + bool low_latency = false; + enum iwl_mvm_traffic_load load; + bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); + + if (handle_ll) + mvm->tcm.ll_ts = ts; + if (handle_uapsd) + mvm->tcm.uapsd_nonagg_ts = ts; + + mvm->tcm.result.elapsed = elapsed; + + for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + u32 vo_vi_pkts = 0; + u32 airtime = mdata->rx.airtime + mdata->tx.airtime; + + total_airtime += airtime; + + load = iwl_mvm_tcm_load(mvm, airtime, elapsed); + mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; + mvm->tcm.result.load[mac] = load; + mvm->tcm.result.airtime[mac] = airtime; + + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++) + vo_vi_pkts += mdata->rx.pkts[ac] + + mdata->tx.pkts[ac]; + + /* enable immediately with enough packets but defer disabling */ + if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH) + mvm->tcm.result.low_latency[mac] = true; + else if (handle_ll) + mvm->tcm.result.low_latency[mac] = false; + + if (handle_ll) { + /* clear old data */ + memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); + memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); + } + low_latency |= mvm->tcm.result.low_latency[mac]; + + memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); + memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + } + + load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); + mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; + mvm->tcm.result.global_load = load; + + /* + * If the current load isn't low we need to force re-evaluation + * in the TCM period, so that we can return to low load if there + * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get + * triggered by traffic). + */ + if (load != IWL_MVM_TRAFFIC_LOW) + return MVM_TCM_PERIOD; + /* + * If low-latency is active we need to force re-evaluation after + * (the longer) MVM_LL_PERIOD, so that we can disable low-latency + * when there's no traffic at all. + */ + if (low_latency) + return MVM_LL_PERIOD; + /* + * Otherwise, we don't need to run the work struct because we're + * in the default "idle" state - traffic indication is low (which + * also covers the "no traffic" case) and low-latency is disabled + * so there's no state that may need to be disabled when there's + * no traffic at all. + * + * Note that this has no impact on the regular scheduling of the + * updates triggered by traffic - those happen whenever one of the + * two timeouts expire (if there's traffic at all.) + */ + return 0; +} + +void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) +{ + unsigned long ts = jiffies; + bool handle_uapsd = + false; + + spin_lock(&mvm->tcm.lock); + if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { + spin_unlock(&mvm->tcm.lock); + return; + } + spin_unlock(&mvm->tcm.lock); + + + spin_lock(&mvm->tcm.lock); + /* re-check if somebody else won the recheck race */ + if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { + /* calculate statistics */ + unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts, + handle_uapsd); + + /* the memset needs to be visible before the timestamp */ + smp_mb(); + mvm->tcm.ts = ts; + if (work_delay) + schedule_delayed_work(&mvm->tcm.work, work_delay); + } + spin_unlock(&mvm->tcm.lock); + + iwl_mvm_tcm_results(mvm); +} + +void iwl_mvm_tcm_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, + tcm.work); + + iwl_mvm_recalc_tcm(mvm); +} + +void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) +{ + spin_lock_bh(&mvm->tcm.lock); + mvm->tcm.paused = true; + spin_unlock_bh(&mvm->tcm.lock); + if (with_cancel) + cancel_delayed_work_sync(&mvm->tcm.work); +} + +void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) +{ + int mac; + + spin_lock_bh(&mvm->tcm.lock); + mvm->tcm.ts = jiffies; + mvm->tcm.ll_ts = jiffies; + for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); + memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); + memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); + memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + } + /* The TCM data needs to be reset before "paused" flag changes */ + smp_mb(); + mvm->tcm.paused = false; + spin_unlock_bh(&mvm->tcm.lock); +} + + void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) { bool ps_disabled; -- cgit v1.2.3 From 85207c66a2f091c005b98fc4976c8daf297b56e7 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 13 Apr 2018 08:58:35 +0300 Subject: iwlwifi: mvm: use TCM data to decide scan priority The code for changing the scan priority is already implemented, but isn't yet in use. Now that TCM data is available, we can base the scan priority decision on the traffic load. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index cd7ae6976935..8f3185b9d365 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -228,7 +228,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) { - return IWL_MVM_TRAFFIC_LOW; + return mvm->tcm.result.global_load; } static enum -- cgit v1.2.3 From bde1492d4a15ae37e1388a6f5a7972afb7ca32e3 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 9 Dec 2014 17:08:41 +0200 Subject: iwlwifi: mvm: BT Coex - make the primary / secondary pick traffic aware The primary channel is the channel that will be untouched by BT. The secondary channel might be touched by BT. Hence, we want the primary to be the most active channel. To do so, use the TCM infrastructure. Since the BT keeps sending notifications, we can rely on them to trigger the check. Every 10 seconds, we will check what is the most active context and chose the right primary. We need to wait 10 seconds before we modify the settings because frequent changes in these settings can confuse BT. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 890dbfff3a06..016e03a5034f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -279,6 +279,8 @@ struct iwl_bt_iterator_data { struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; bool primary_ll; + u8 primary_load; + u8 secondary_load; }; static inline @@ -295,6 +297,30 @@ void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } +#define MVM_COEX_TCM_PERIOD (HZ * 10) + +static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, + struct iwl_bt_iterator_data *data) +{ + unsigned long now = jiffies; + + if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) + return; + + mvm->bt_coex_last_tcm_ts = now; + + /* We assume here that we don't have more than 2 vifs on 2.4GHz */ + + /* if the primary is low latency, it will stay primary */ + if (data->primary_ll) + return; + + if (data->primary_load >= data->secondary_load) + return; + + swap(data->primary, data->secondary); +} + /* must be called under rcu_read_lock */ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) @@ -385,6 +411,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* there is low latency vif - we will be secondary */ data->secondary = chanctx_conf; } + + if (data->primary == chanctx_conf) + data->primary_load = mvm->tcm.result.load[mvmvif->id]; + else if (data->secondary == chanctx_conf) + data->secondary_load = mvm->tcm.result.load[mvmvif->id]; return; } @@ -398,6 +429,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* if secondary is not NULL, it might be a GO */ data->secondary = chanctx_conf; + if (data->primary == chanctx_conf) + data->primary_load = mvm->tcm.result.load[mvmvif->id]; + else if (data->secondary == chanctx_conf) + data->secondary_load = mvm->tcm.result.load[mvmvif->id]; /* * don't reduce the Tx power if one of these is true: * we are in LOOSE @@ -449,6 +484,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); + iwl_mvm_bt_coex_tcm_based_ci(mvm, &data); + if (data.primary) { struct ieee80211_chanctx_conf *chan = data.primary; if (WARN_ON(!chan->def.chan)) { -- cgit v1.2.3 From b0ffe455bc5bbdbcf7837274d2476f5597767237 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 11 Nov 2014 12:57:03 +0100 Subject: iwlwifi: mvm: detect U-APSD breaking aggregation Try to detect that the AP is not using aggregation even when there's enough traffic to make it worthwhile; if this is the case and U-APSD is enabled then assume the AP is broken (like so many) and doesn't enable aggregation when U-APSD is used. In this case, disconnect from the AP and blacklist U-APSD for a potential new connection to it. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 4 + drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 25 ++++ drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 33 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 9 ++ drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 47 ++++++++ drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 127 ++++++++++++++++++++- 6 files changed, 244 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 2763fc69f04b..d61ff66ce07b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -69,6 +69,8 @@ #include +#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 + #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) @@ -115,6 +117,8 @@ #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ #define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */ #define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ +#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ +#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 0e6401cd7ccc..1c4178f20441 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1728,6 +1728,27 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t +iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; + unsigned int pos = 0; + size_t bufsz = sizeof(buf); + int i; + + mutex_lock(&mvm->mutex); + + for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + mvm->uapsd_noagg_bssids[i].addr); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ @@ -1762,6 +1783,8 @@ MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); +MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); + #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); @@ -1972,6 +1995,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) goto err; + MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); + #ifdef CONFIG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { bcast_dir = debugfs_create_dir("bcast_filtering", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 51b30424575b..08318bdaaf2e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -952,6 +952,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: + if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == + iwl_mvm_sta_from_mac80211(sta)->sta_id) { + struct iwl_mvm_vif *mvmvif; + u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; + + mdata->opened_rx_ba_sessions = true; + mvmvif = iwl_mvm_vif_from_mac80211(vif); + cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); + } if (!iwl_enable_rx_ampdu(mvm->cfg)) { ret = -EINVAL; break; @@ -1435,6 +1445,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = vif; } + iwl_mvm_tcm_add_vif(mvm, vif); + if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; @@ -1486,6 +1498,10 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_prepare_mac_removal(mvm, vif); + if (!(vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + iwl_mvm_tcm_rm_vif(mvm, vif); + mutex_lock(&mvm->mutex); if (mvm->bf_allowed_vif == mvmvif) { @@ -2535,6 +2551,16 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { + int i; + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_tcm_mac *mdata; + + mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; + ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); + mdata->opened_rx_ba_sessions = false; + } + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; @@ -2549,6 +2575,13 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; } + for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { + if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + return; + } + } + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 72bab44082ea..acc36eb1ef39 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -446,6 +446,8 @@ struct iwl_mvm_vif { /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; + struct delayed_work uapsd_nonagg_detected_wk; + /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; @@ -621,6 +623,7 @@ struct iwl_mvm_tcm_mac { struct ewma_rate rate; bool detected; } uapsd_nonagg_detect; + bool opened_rx_ba_sessions; }; struct iwl_mvm_tcm { @@ -1028,6 +1031,10 @@ struct iwl_mvm { unsigned long bt_coex_last_tcm_ts; struct iwl_mvm_tcm tcm; + u8 uapsd_noagg_bssid_write_idx; + struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] + __aligned(2); + struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE @@ -1963,6 +1970,8 @@ void iwl_mvm_tcm_work(struct work_struct *work); void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); +void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index be80294349c3..bfb163419c67 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -264,6 +264,12 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, struct iwl_mvm_tcm_mac *mdata; int mac; int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ + struct iwl_mvm_vif *mvmvif; + /* expected throughput in 100Kbps, single stream, 20 MHz */ + static const u8 thresh_tpt[] = { + 9, 18, 30, 42, 60, 78, 90, 96, 120, 135, + }; + u16 thr; if (ieee80211_is_data_qos(hdr->frame_control)) ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; @@ -285,6 +291,35 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) return; + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mdata->opened_rx_ba_sessions || + mdata->uapsd_nonagg_detect.detected || + (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || + mvmsta->sta_id != mvmvif->ap_sta_id) + return; + + if (rate_n_flags & RATE_MCS_HT_MSK) { + thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK]; + thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> + RATE_HT_MCS_NSS_POS); + } else { + if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= + ARRAY_SIZE(thresh_tpt))) + return; + thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; + thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS); + } + + thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> + RATE_MCS_CHAN_WIDTH_POS); + + mdata->uapsd_nonagg_detect.rx_bytes += len; + ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr); } static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, @@ -693,6 +728,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, int expected_size; int i; u8 *energy; + __le32 *bytes; __le32 *air_time; __le32 flags; @@ -768,11 +804,13 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data; energy = (void *)&v11->load_stats.avg_energy; + bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; + bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } @@ -802,6 +840,15 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; u32 airtime = le32_to_cpu(air_time[i]); + u32 rx_bytes = le32_to_cpu(bytes[i]); + + mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes; + if (airtime) { + /* re-init every time to store rate from FW */ + ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); + ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, + rx_bytes * 8 / airtime); + } mdata->rx.airtime += airtime; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 27905f3fe3ba..ea0332a2389e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1503,12 +1503,109 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) mutex_unlock(&mvm->mutex); } +static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + + mvmvif = container_of(wk, struct iwl_mvm_vif, + uapsd_nonagg_detected_wk.work); + vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + mvm = mvmvif->mvm; + + if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) + return; + + /* remember that this AP is broken */ + memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr, + vif->bss_conf.bssid, ETH_ALEN); + mvm->uapsd_noagg_bssid_write_idx++; + if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN) + mvm->uapsd_noagg_bssid_write_idx = 0; + + iwl_mvm_connection_loss(mvm, vif, + "AP isn't using AMPDU with uAPSD enabled"); +} + +static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int *mac_id = data; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (mvmvif->id != *mac_id) + return; + + if (!vif->bss_conf.assoc) + return; + + if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) + return; + + if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected) + return; + + mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true; + IWL_INFO(mvm, + "detected AP should do aggregation but isn't, likely due to U-APSD\n"); + schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); +} + +static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, + unsigned int elapsed, + int mac) +{ + u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; + u64 tpt; + unsigned long rate; + + rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); + + if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || + mvm->tcm.data[mac].uapsd_nonagg_detect.detected) + return; + + if (iwl_mvm_has_new_rx_api(mvm)) { + tpt = 8 * bytes; /* kbps */ + do_div(tpt, elapsed); + rate *= 1000; /* kbps */ + if (tpt < 22 * rate / 100) + return; + } else { + /* + * the rate here is actually the threshold, in 100Kbps units, + * so do the needed conversion from bytes to 100Kbps: + * 100kb = bits / (100 * 1000), + * 100kbps = 100kb / (msecs / 1000) == + * (bits / (100 * 1000)) / (msecs / 1000) == + * bits / (100 * msecs) + */ + tpt = (8 * bytes); + do_div(tpt, elapsed * 100); + if (tpt < rate) + return; + } + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_uapsd_agg_disconnect_iter, &mac); +} static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, unsigned long ts, bool handle_uapsd) { unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); + unsigned int uapsd_elapsed = + jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); u32 total_airtime = 0; int ac, mac; bool low_latency = false; @@ -1551,6 +1648,12 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, } low_latency |= mvm->tcm.result.low_latency[mac]; + if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) + iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed, + mac); + /* clear old data */ + if (handle_uapsd) + mdata->uapsd_nonagg_detect.rx_bytes = 0; memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); } @@ -1592,7 +1695,8 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) { unsigned long ts = jiffies; bool handle_uapsd = - false; + time_after(ts, mvm->tcm.uapsd_nonagg_ts + + msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD)); spin_lock(&mvm->tcm.lock); if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { @@ -1601,6 +1705,12 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) } spin_unlock(&mvm->tcm.lock); + if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { + mutex_lock(&mvm->mutex); + if (iwl_mvm_request_statistics(mvm, true)) + handle_uapsd = false; + mutex_unlock(&mvm->mutex); + } spin_lock(&mvm->tcm.lock); /* re-check if somebody else won the recheck race */ @@ -1659,6 +1769,21 @@ void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) spin_unlock_bh(&mvm->tcm.lock); } +void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk, + iwl_mvm_tcm_uapsd_nonagg_detected_wk); +} + +void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); +} + void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) { -- cgit v1.2.3 From b66b5817a0ce0e5653f4f60ab583c19cb5dab546 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 31 Jan 2017 10:39:44 +0200 Subject: iwlwifi: mvm: detect low latency and traffic load per band Detect low latency and traffic load per band. Add support for deciding on scan type and timings per band. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 8 ++ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 167 ++++++++++++++++++------- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 66 ++++++++-- 4 files changed, 192 insertions(+), 50 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 3c59109bea20..4cf3e32e3dba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1093,6 +1093,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; + mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index acc36eb1ef39..2150b785387f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -638,6 +640,7 @@ struct iwl_mvm_tcm { u32 elapsed; /* milliseconds for this TCM period */ u32 airtime[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; + enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; @@ -879,7 +882,10 @@ struct iwl_mvm { unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; + /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; + enum iwl_mvm_scan_type hb_scan_type; + enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; @@ -1828,6 +1834,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); +bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); + /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 8f3185b9d365..8fcd92dcaaa7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,9 +20,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -117,7 +116,9 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { }; struct iwl_mvm_scan_params { + /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type type; + enum iwl_mvm_scan_type hb_type; u32 n_channels; u16 delay; int n_ssids; @@ -231,12 +232,18 @@ static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) return mvm->tcm.result.global_load; } +static enum iwl_mvm_traffic_load +iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) +{ + return mvm->tcm.result.band_load[band]; +} + static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, + enum iwl_mvm_traffic_load load, + bool low_latency) { int global_cnt = 0; - enum iwl_mvm_traffic_load load; - bool low_latency; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -245,9 +252,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) if (!global_cnt) return IWL_SCAN_TYPE_UNASSOC; - load = iwl_mvm_get_traffic_load(mvm); - low_latency = iwl_mvm_low_latency(mvm); - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) return IWL_SCAN_TYPE_FRAGMENTED; @@ -258,25 +262,57 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) return IWL_SCAN_TYPE_WILD; } +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +{ + enum iwl_mvm_traffic_load load; + bool low_latency; + + load = iwl_mvm_get_traffic_load(mvm); + low_latency = iwl_mvm_low_latency(mvm); + + return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); +} + +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, + bool p2p_device, + enum nl80211_band band) +{ + enum iwl_mvm_traffic_load load; + bool low_latency; + + load = iwl_mvm_get_traffic_load_band(mvm, band); + low_latency = iwl_mvm_low_latency_band(mvm, band); + + return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); +} + static int iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, struct cfg80211_scan_request *req, struct iwl_mvm_scan_params *params) { + u32 duration = scan_timing[params->type].max_out_time; + if (!req->duration) return 0; - if (req->duration_mandatory && - req->duration > scan_timing[params->type].max_out_time) { + if (iwl_mvm_is_cdb_supported(mvm)) { + u32 hb_time = scan_timing[params->hb_type].max_out_time; + + duration = min_t(u32, duration, hb_time); + } + + if (req->duration_mandatory && req->duration > duration) { IWL_DEBUG_SCAN(mvm, "Measurement scan - too long dwell %hu (max out time %u)\n", req->duration, - scan_timing[params->type].max_out_time); + duration); return -EOPNOTSUPP; } - return min_t(u32, (u32)req->duration, - scan_timing[params->type].max_out_time); + return min_t(u32, (u32)req->duration, duration); } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) @@ -1025,22 +1061,38 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags) { - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); struct iwl_scan_config *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); - cfg->out_of_channel_time[0] = - cpu_to_le32(scan_timing[type].max_out_time); - cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cfg->suspend_time[1] = - cpu_to_le32(scan_timing[type].suspend_time); - cfg->out_of_channel_time[1] = + enum iwl_mvm_scan_type lb_type, hb_type; + + lb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_2GHZ); + hb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_5GHZ); + + cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[lb_type].max_out_time); + cfg->suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[lb_type].suspend_time); + + cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(scan_timing[hb_type].max_out_time); + cfg->suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(scan_timing[hb_type].suspend_time); + } else { + enum iwl_mvm_scan_type type = + iwl_mvm_get_scan_type(mvm, false); + + cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[type].suspend_time); } iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); @@ -1060,7 +1112,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + enum iwl_mvm_scan_type type; + enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; int num_channels = mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; @@ -1070,8 +1123,18 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) - return 0; + if (iwl_mvm_is_cdb_supported(mvm)) { + type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_2GHZ); + hb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_5GHZ); + if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) + return 0; + } else { + type = iwl_mvm_get_scan_type(mvm, false); + if (type == mvm->scan_type) + return 0; + } if (iwl_mvm_has_new_tx_api(mvm)) cmd_size = sizeof(struct iwl_scan_config); @@ -1102,10 +1165,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; + /* + * Check for fragmented scan on LMAC2 - high band. + * LMAC1 - low band is checked above. + */ if (iwl_mvm_has_new_tx_api(mvm)) { - flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? - SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : - SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; + if (iwl_mvm_is_cdb_supported(mvm)) + flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); } else { iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags); @@ -1118,8 +1186,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); - if (!ret) + if (!ret) { mvm->scan_type = type; + mvm->hb_scan_type = hb_type; + } kfree(cfg); return ret; @@ -1173,7 +1243,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - hb_timing = &scan_timing[params->type]; + hb_timing = &scan_timing[params->hb_type]; cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); @@ -1203,7 +1273,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm)) { - hb_timing = &scan_timing[params->type]; + hb_timing = &scan_timing[params->hb_type]; cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); @@ -1212,8 +1282,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, } if (iwl_mvm_has_new_tx_api(mvm)) { - cmd->v6.scan_priority = - cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = @@ -1257,11 +1326,12 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) { + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; - if (iwl_mvm_is_cdb_supported(mvm)) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; - } + + if (iwl_mvm_is_cdb_supported(mvm) && + params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, @@ -1492,6 +1562,21 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work) iwl_force_nmi(mvm->trans); } +static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + bool p2p) +{ + if (iwl_mvm_is_cdb_supported(mvm)) { + params->type = + iwl_mvm_get_scan_type_band(mvm, p2p, + NL80211_BAND_2GHZ); + params->hb_type = + iwl_mvm_get_scan_type_band(mvm, p2p, + NL80211_BAND_5GHZ); + } else { + params->type = iwl_mvm_get_scan_type(mvm, p2p); + } +} int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1539,9 +1624,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; - params.type = - iwl_mvm_get_scan_type(mvm, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, + vif->type == NL80211_IFTYPE_P2P_DEVICE); ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); if (ret < 0) @@ -1636,9 +1720,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; - params.type = - iwl_mvm_get_scan_type(mvm, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, + vif->type == NL80211_IFTYPE_P2P_DEVICE); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index ea0332a2389e..815d2d141fd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1074,23 +1074,48 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_power_update_mac(mvm); } +struct iwl_mvm_low_latency_iter { + bool result; + bool result_per_band[NUM_NL80211_BANDS]; +}; + static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { - bool *result = _data; + struct iwl_mvm_low_latency_iter *result = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + enum nl80211_band band; + + if (iwl_mvm_vif_low_latency(mvmvif)) { + result->result = true; + + if (!mvmvif->phy_ctxt) + return; - if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) - *result = true; + band = mvmvif->phy_ctxt->channel->band; + result->result_per_band[band] = true; + } } bool iwl_mvm_low_latency(struct iwl_mvm *mvm) { - bool result = false; + struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_ll_iter, &result); + iwl_mvm_ll_iter, &data); - return result; + return data.result; +} + +bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band) +{ + struct iwl_mvm_low_latency_iter data = {}; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_ll_iter, &data); + + return data.result_per_band[band]; } struct iwl_bss_iter_data { @@ -1599,6 +1624,18 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, iwl_mvm_uapsd_agg_disconnect_iter, &mac); } +static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 *band = _data; + + if (!mvmvif->phy_ctxt) + return; + + band[mvmvif->id] = mvmvif->phy_ctxt->channel->band; +} + static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, unsigned long ts, bool handle_uapsd) @@ -1607,9 +1644,11 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, unsigned int uapsd_elapsed = jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); u32 total_airtime = 0; - int ac, mac; + u32 band_airtime[NUM_NL80211_BANDS] = {0}; + u32 band[NUM_MAC_INDEX_DRIVER] = {0}; + int ac, mac, i; bool low_latency = false; - enum iwl_mvm_traffic_load load; + enum iwl_mvm_traffic_load load, band_load; bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); if (handle_ll) @@ -1619,12 +1658,18 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, mvm->tcm.result.elapsed = elapsed; + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tcm_iterator, + &band); + for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; u32 vo_vi_pkts = 0; u32 airtime = mdata->rx.airtime + mdata->tx.airtime; total_airtime += airtime; + band_airtime[band[mac]] += airtime; load = iwl_mvm_tcm_load(mvm, airtime, elapsed); mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; @@ -1662,6 +1707,11 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; mvm->tcm.result.global_load = load; + for (i = 0; i < NUM_NL80211_BANDS; i++) { + band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed); + mvm->tcm.result.band_load[i] = band_load; + } + /* * If the current load isn't low we need to force re-evaluation * in the TCM period, so that we can return to low load if there -- cgit v1.2.3 From 622111a2d21baca39bb8d5cf56c2d509e50fe278 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2017 09:07:11 +0200 Subject: iwlwifi: mvm: clean up scan capability checks Introduce and use iwl_mvm_cdb_scan_api(), which checks the family. Most of this will go away once the 22000 firmware supports adaptive dwell, after which the V6 scan API won't be used, but the V3 scan *config* API will still need to be distinguished. In any case, this gets rid of the completely bogus has_new_tx_api() checks. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 10 ++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 15 ++++++++------- 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2150b785387f..2411cc91f833 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1356,6 +1356,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } +static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) +{ + /* + * TODO: should this be the same as iwl_mvm_is_cdb_supported()? + * but then there's a little bit of code in scan that won't make + * any sense... + */ + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; +} + static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 8fcd92dcaaa7..8c5f4f96d6d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -147,7 +147,7 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return (void *)&cmd->v7.data; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) return (void *)&cmd->v6.data; return (void *)&cmd->v1.data; @@ -164,7 +164,7 @@ iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return &cmd->v7.channel; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) return &cmd->v6.channel; return &cmd->v1.channel; @@ -1136,7 +1136,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return 0; } - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) cmd_size = sizeof(struct iwl_scan_config); else cmd_size = sizeof(struct iwl_scan_config_v1); @@ -1169,7 +1169,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) * Check for fragmented scan on LMAC2 - high band. * LMAC1 - low band is checked above. */ - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_cdb_scan_api(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : @@ -1281,8 +1281,9 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(hb_timing->suspend_time); } - if (iwl_mvm_has_new_tx_api(mvm)) { - cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + if (iwl_mvm_cdb_scan_api(mvm)) { + cmd->v6.scan_priority = + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = @@ -1909,7 +1910,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; - else if (iwl_mvm_has_new_tx_api(mvm)) + else if (iwl_mvm_cdb_scan_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) -- cgit v1.2.3 From cf58c9e0915aad684aa8e9b82afd260c67ee7b22 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 13 Apr 2018 13:17:16 +0300 Subject: iwlwifi: mvm: fix OOC priority in scans The code that sets the correct out-of-channel priority depending on the scan type was accidentally removed during a rebase. Add it back. Fixes: c1a7515393e4 ("iwlwifi: mvm: add adaptive dwell support") Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 8c5f4f96d6d4..4b3753d78d03 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1297,6 +1297,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(timing->suspend_time); } } + + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void -- cgit v1.2.3 From 84226ca1c5d34e6a8492d12848e9cdf7752b834b Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Thu, 2 Nov 2017 04:07:52 +0200 Subject: iwlwifi: mvm: support offload of AMSDU rate control Support the new APIs and activate AMSDU based on the offloaded TLC decisions. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/datapath.h | 5 +++ drivers/net/wireless/intel/iwlwifi/fw/api/rs.h | 18 ++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 8 +++-- drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 42 +++++++++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 15 +++++--- drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 5 ++- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 7 ++-- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 11 ++++-- 8 files changed, 96 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index a57c7223df0f..184cee98c359 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -92,6 +92,11 @@ enum iwl_data_path_subcmd_ids { */ TLC_MNG_NOTIF_REQ_CMD = 0x10, + /** + * @TLC_MNG_AMSDU_ENABLE_NOTIF: &struct iwl_tlc_amsdu_notif + */ + TLC_MNG_AMSDU_ENABLE_NOTIF = 0xF6, + /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index e49a6f7be613..1a8f3154a1fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -205,7 +205,7 @@ enum iwl_tlc_mng_ht_rates { * @non_ht_supp_rates: bitmap of supported legacy rates * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9 * @mode: &enum iwl_tlc_mng_cfg_mode - * @reserved2: reserved + * @amsdu: TX amsdu is supported * @he_supp_rates: bitmap of supported HE rates * @sgi_ch_width_supp: bitmap of SGI support per channel width * @he_gi_support: 11ax HE guard interval @@ -222,13 +222,27 @@ struct iwl_tlc_config_cmd { __le16 non_ht_supp_rates; __le16 ht_supp_rates[MAX_RS_ANT_NUM]; u8 mode; - u8 reserved2; + u8 amsdu; __le16 he_supp_rates; u8 sgi_ch_width_supp; u8 he_gi_support; __le32 max_ampdu_cnt; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */ +/** + * struct iwl_tlc_amsdu_notif - TLC AMSDU configuration + * @sta_id: station id + * @reserved: reserved + * @amsdu_size: Max AMSDU size, in bytes + * @amsdu_enabled: bitmap for per-TID AMSDU enablement + */ +struct iwl_tlc_amsdu_notif { + u8 sta_id; + u8 reserved[3]; + __le16 amsdu_size; + __le16 amsdu_enabled; +} __packed; /* TLC_MNG_AMSDU_ENABLE_NTFY_API_S_VER_1 */ + #define IWL_TLC_NOTIF_INIT_RATE_POS 0 #define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS) #define IWL_TLC_NOTIF_REQ_INTERVAL (500) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 6a3f557543e4..1ef4ac21f32a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -250,6 +250,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, + iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC), + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, @@ -309,6 +312,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_AMSDU_ENABLE_NOTIF, + iwl_mvm_tlc_amsdu_notif, RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -445,6 +450,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(TLC_MNG_AMSDU_ENABLE_NOTIF), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), @@ -1034,8 +1040,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_queue_notif(mvm, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); - else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF)) - iwl_mvm_tlc_update_notif(mvm, pkt); else iwl_mvm_rx_common(mvm, rxb, pkt); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index fb5745660509..4e818bce469b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -228,8 +228,47 @@ static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id) IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret); } -void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) +void iwl_mvm_tlc_amsdu_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tlc_amsdu_notif *notif; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u16 size; + + notif = (void *)pkt->data; + + if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) + return; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); + if (IS_ERR_OR_NULL(sta)) { + rcu_read_unlock(); + IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", + notif->sta_id); + return; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + size = min(le16_to_cpu(notif->amsdu_size), sta->max_amsdu_len); + mvmsta->amsdu_enabled = le16_to_cpu(notif->amsdu_enabled); + mvmsta->max_amsdu_len = size; + + IWL_DEBUG_RATE(mvm, + "AMSDU notification. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", + le16_to_cpu(notif->amsdu_size), size, + mvmsta->amsdu_enabled); + + rcu_read_unlock(); +}; + +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; struct iwl_mvm_sta *mvmsta; struct iwl_lq_sta_rs_fw *lq_sta; @@ -273,6 +312,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, .max_supp_ss = sta->rx_nss, .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + .amsdu = iwl_mvm_is_csum_supported(mvm), }; int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 5d776ec1840f..af7bc3b7b0c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1715,12 +1715,18 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + /* + * In case TLC offload is not active amsdu_enabled is either 0xFFFF + * or 0, since there is no per-TID alg. + */ if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl->rate.index < IWL_RATE_MCS_5_INDEX || scale_action == RS_ACTION_DOWNSCALE) - mvmsta->tlc_amsdu = false; + mvmsta->amsdu_enabled = 0; else - mvmsta->tlc_amsdu = true; + mvmsta->amsdu_enabled = 0xFFFF; + + mvmsta->max_amsdu_len = sta->max_amsdu_len; } /* @@ -3134,7 +3140,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; lq_sta->lq.sta_id = mvmsta->sta_id; - mvmsta->tlc_amsdu = false; + mvmsta->amsdu_enabled = 0; + mvmsta->max_amsdu_len = sta->max_amsdu_len; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3744,7 +3751,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", (lq_sta->is_agg) ? "AGG on" : "", - (mvmsta->tlc_amsdu) ? "AMSDU on" : ""); + (mvmsta->amsdu_enabled) ? "AMSDU on" : ""); } desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index fb18cb8c233d..736ac9642921 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -454,5 +454,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band); int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); -void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_tlc_amsdu_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); #endif /* __rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 5ffd6adbc383..60502c81dfce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -391,7 +391,9 @@ struct iwl_mvm_rxq_dup_data { * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? - * @tlc_amsdu: true if A-MSDU is allowed + * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. + * In case TLC offload is not active it is either 0xFFFF or 0. + * @max_amsdu_len: max AMSDU length * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue @@ -436,7 +438,8 @@ struct iwl_mvm_sta { bool tt_tx_protection; bool disable_tx; - bool tlc_amsdu; + u16 amsdu_enabled; + u16 max_amsdu_len; bool sleeping; bool associated; u8 agg_tids; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index f06a0ee7be28..90381348697c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -774,9 +774,9 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); - if (!sta->max_amsdu_len || + if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || - (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) + (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len)) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* @@ -803,7 +803,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - max_amsdu_len = sta->max_amsdu_len; + if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || + tid_to_mac80211_ac[tid] < IEEE80211_AC_BE || + !(mvmsta->amsdu_enabled & BIT(tid))) + return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); + + max_amsdu_len = mvmsta->max_amsdu_len; /* the Tx FIFO to which this A-MSDU will be routed */ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); -- cgit v1.2.3 From f79b8f9dc77238f03d7fffd8f34976cfc7b076b5 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 25 Dec 2017 15:15:06 +0200 Subject: iwlwifi: pcie: implement the overlow queue for Gen2 devices When we enable TSO, we can have a lot of packets in the operation mode that will be pushed to the transport no matter what is the queue's fullness state. To cope with that the transport can buffer those packets and add them to the ring later when there is more room. This implementation was missing in the Gen2 devices' code. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 27 ++++++++++++++++++++--- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 2 +- 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index fabae0f60683..b605b364ed3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -488,6 +488,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (iwl_queue_space(txq) < txq->high_mark) { + iwl_stop_queue(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_queue_space(txq) < 3)) { + struct iwl_device_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans_pcie->dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + spin_unlock(&txq->lock); + return 0; + } + } + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); /* Set up driver data for this TFD */ @@ -523,9 +540,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); - if (iwl_queue_space(txq) < txq->high_mark) - iwl_stop_queue(trans, txq); - /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. @@ -957,6 +971,13 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 1a566287993d..86873b99e5d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1166,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, * In that case, iwl_queue_space will be small again * and we won't wake mac80211's queue. */ - iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); + iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } spin_lock_bh(&txq->lock); -- cgit v1.2.3 From 422b5dd4294c3c5a3d88c77041f1fceff1d2e4ce Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Wed, 27 Dec 2017 22:10:28 +0200 Subject: iwlwifi: move timestamp functions from debugfs.h to dbg.h These functions are not debugfs functions so they should be in dbg.h instad in debugfs.h. Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/dbg.h | 36 +++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/debugfs.c | 1 + drivers/net/wireless/intel/iwlwifi/fw/debugfs.h | 31 --------------------- 3 files changed, 37 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 72259bff9922..507d9a49fa97 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -227,4 +227,40 @@ static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt) cancel_delayed_work_sync(&fwrt->dump.wk); } +#ifdef CONFIG_IWLWIFI_DEBUGFS +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) +{ + fwrt->timestamp.delay = 0; + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); + +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) +{ + if (!fwrt->timestamp.delay) + return; + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); +} + +#else + +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, + u32 delay) {} + +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} + +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 8f005cd69559..8ba5a60ec9ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -64,6 +64,7 @@ *****************************************************************************/ #include "api/commands.h" #include "debugfs.h" +#include "dbg.h" #define FWRT_DEBUGFS_READ_FILE_OPS(name) \ static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index d93f6a4bb22d..cbbfa8e9e66d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -69,28 +69,6 @@ int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir); -static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) -{ - fwrt->timestamp.delay = 0; - cancel_delayed_work_sync(&fwrt->timestamp.wk); -} - -static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) -{ - cancel_delayed_work_sync(&fwrt->timestamp.wk); -} - -static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) -{ - if (!fwrt->timestamp.delay) - return; - - schedule_delayed_work(&fwrt->timestamp.wk, - round_jiffies_relative(fwrt->timestamp.delay)); -} - -void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); - #else static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) @@ -98,13 +76,4 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, return 0; } -static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, - u32 delay) {} - #endif /* CONFIG_IWLWIFI_DEBUGFS */ -- cgit v1.2.3 From e0498146c8792b1ebc321c0e29e633b6a0918a6e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 4 Jan 2018 13:53:55 +0200 Subject: iwlwifi: pcie: allocate shorter TX queues for 22000 devices When support for shorter TX queues was introduced, it didn't include the actual allocation of shorter queue, which is the main motive for the change. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b605b364ed3f..734a5fc3584d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -993,7 +993,7 @@ static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + trans_pcie->tfd_size * txq->n_window, txq->tfds, txq->dma_addr); dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 86873b99e5d7..4493520aae1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -495,6 +495,9 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; + if (trans->cfg->use_tfh) + tfd_sz = trans_pcie->tfd_size * slots_num; + timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); txq->trans_pcie = trans_pcie; -- cgit v1.2.3 From 01302f5b4a5e5791b3938ab8d3216232b1830b90 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 4 Jan 2018 13:47:09 +0200 Subject: iwlwifi: Revert "iwlwifi: pcie: dynamic Tx command queue size" This reverts commit dd05f9aab4426ff178b12d601e50d19d336eba30. Shorter TX queues support was added eventually without the need for the parameters this patch added. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/22000.c | 1 - drivers/net/wireless/intel/iwlwifi/iwl-config.h | 3 --- .../net/wireless/intel/iwlwifi/pcie/ctxt-info.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 3 --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 8 ++------ drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 23 ++-------------------- 6 files changed, 5 insertions(+), 35 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index dffd9df782b0..f2b6e0e8d787 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -137,7 +137,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .tx_cmd_queue_size = 32, \ .min_umac_error_event_table = 0x400000 const struct iwl_cfg iwl22000_2ac_cfg_hr = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f0f5636dd3ea..687bed30c851 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -333,8 +333,6 @@ struct iwl_pwr_tx_backoff { * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type - * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as - * the regular queues * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -386,7 +384,6 @@ struct iwl_cfg { gen2:1, cdb:1, dbgc_supported:1; - u16 tx_cmd_queue_size; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 5ef216f3a60b..3fc4343581ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = - TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size); + TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index ca3b64ff4dad..a8833028e1cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -383,7 +383,6 @@ struct iwl_self_init_dram { * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes - * @tx_cmd_queue_size: the size of the tx command queue */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -465,7 +464,6 @@ struct iwl_trans_pcie { u32 fh_mask; u32 hw_mask; cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; - u16 tx_cmd_queue_size; }; static inline struct iwl_trans_pcie * @@ -537,7 +535,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); -void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans); static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 734a5fc3584d..7bdb9c9e04b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1173,8 +1173,6 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) struct iwl_txq *cmd_queue; int txq_id = trans_pcie->cmd_queue, ret; - iwl_pcie_set_tx_cmd_queue_size(trans); - /* alloc and init the command queue */ if (!trans_pcie->txq[txq_id]) { cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); @@ -1183,8 +1181,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) return -ENOMEM; } trans_pcie->txq[txq_id] = cmd_queue; - ret = iwl_pcie_txq_alloc(trans, cmd_queue, - trans_pcie->tx_cmd_queue_size, true); + ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; @@ -1193,8 +1190,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) cmd_queue = trans_pcie->txq[txq_id]; } - ret = iwl_pcie_txq_init(trans, cmd_queue, - trans_pcie->tx_cmd_queue_size, true); + ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 4493520aae1e..cf468b9f5a82 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -953,8 +953,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : - TFD_TX_CMD_SLOTS; + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); @@ -973,21 +972,6 @@ error: return ret; } -void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int queue_size = TFD_CMD_SLOTS; - - if (trans->cfg->tx_cmd_queue_size) - queue_size = trans->cfg->tx_cmd_queue_size; - - if (WARN_ON(!(is_power_of_2(queue_size) && - TFD_QUEUE_CB_SIZE(queue_size) > 0))) - trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS; - else - trans_pcie->tx_cmd_queue_size = queue_size; -} - int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -995,8 +979,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) int txq_id, slots_num; bool alloc = false; - iwl_pcie_set_tx_cmd_queue_size(trans); - if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) @@ -1020,8 +1002,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : - TFD_TX_CMD_SLOTS; + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { -- cgit v1.2.3 From 5369774c84eac674af1741c4322ad7728694d80b Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 4 Jan 2018 14:17:06 +0200 Subject: iwlwifi: add TX queue size parameter to TX queue allocation As preparation for dynamic queue sizing, add a parameter of the TX queue size to the dynamic queue allocation op mode API. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 1 + drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 8 ++++---- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 3 ++- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 2 +- 5 files changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index dfa111bb411e..1f45b2ce5295 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -131,6 +131,7 @@ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; +#define IWL_DEFAULT_QUEUE_SIZE 256 /** * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command * @sta_id: station id diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index c25ed1a0bbb0..a92832711683 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -554,7 +554,7 @@ struct iwl_trans_ops { /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); @@ -952,8 +952,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue) static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, - unsigned int queue_wdg_timeout) + int cmd_id, int size, + unsigned int wdg_timeout) { might_sleep(); @@ -965,7 +965,7 @@ iwl_trans_txq_alloc(struct iwl_trans *trans, return -EIO; } - return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout); + return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 815d2d141fd8..81c02d6e3d82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -733,7 +733,8 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, if (cmd.tid == IWL_MAX_TID_COUNT) cmd.tid = IWL_MGMT_TID; queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, - SCD_QUEUE_CFG, timeout); + SCD_QUEUE_CFG, IWL_DEFAULT_QUEUE_SIZE, + timeout); if (queue < 0) { IWL_DEBUG_TX_QUEUES(mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a8833028e1cf..cda66340d357 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -819,7 +819,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 7bdb9c9e04b1..b3d151ec8fe4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1041,7 +1041,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -- cgit v1.2.3 From 3cfb6de73df3bceaf1942297659177ac50267852 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 4 Jan 2018 14:02:51 +0200 Subject: iwlwifi: pcie: use the queue size as sent by opmode Op mode will begin tp use varying size of TX queue. All the infra is in place, allow it. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b3d151ec8fe4..a235dda1f70b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1067,12 +1067,12 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, return -ENOMEM; } - ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false); + ret = iwl_pcie_txq_alloc(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue alloc failed\n"); goto error; } - ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false); + ret = iwl_pcie_txq_init(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue init failed\n"); goto error; @@ -1082,7 +1082,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS)); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) -- cgit v1.2.3 From 251985c92865bc68a0b17a6409a640914f3ab1ba Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 4 Jan 2018 14:06:07 +0200 Subject: iwlwifi: mvm: use shorter queues for mgmt and auxilary queues In 22000 devices, aka gen2, the TFS is 256 bytes. In order to save memory, use shorter TX queue for aux and mgmt queues, since there isn't much traffic on them. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 1f45b2ce5295..6ac240b6eace 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -132,6 +132,7 @@ enum iwl_tx_queue_cfg_actions { }; #define IWL_DEFAULT_QUEUE_SIZE 256 +#define IWL_MGMT_QUEUE_SIZE 16 /** * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command * @sta_id: station id diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 81c02d6e3d82..e597bc193e5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -728,13 +728,14 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, .sta_id = sta_id, .tid = tid, }; - int queue; + int queue, size = IWL_DEFAULT_QUEUE_SIZE; - if (cmd.tid == IWL_MAX_TID_COUNT) + if (cmd.tid == IWL_MAX_TID_COUNT) { cmd.tid = IWL_MGMT_TID; + size = IWL_MGMT_QUEUE_SIZE; + } queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, - SCD_QUEUE_CFG, IWL_DEFAULT_QUEUE_SIZE, - timeout); + SCD_QUEUE_CFG, size, timeout); if (queue < 0) { IWL_DEBUG_TX_QUEUES(mvm, -- cgit v1.2.3 From 9c4f7d5127402294b50c9ff18fad66639f3b81d0 Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Sun, 14 Jan 2018 19:06:09 +0200 Subject: iwlwifi: move all NVM parsing code to the common files Move all the NVM file handling code to iwl-nvm-parse.c where all this stuff belongs. This cleans up the MVM specific code and allows easier reuse by other opmodes if needed. Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 214 ++++++++++++++++++++- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 23 +++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 7 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 15 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 208 +------------------- 5 files changed, 247 insertions(+), 220 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8928613e033e..3437ed480e31 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@ #include #include #include +#include #include "iwl-drv.h" #include "iwl-modparams.h" @@ -76,6 +79,7 @@ #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" +#include "fw/api/nvm-reg.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -146,8 +150,8 @@ static const u8 iwl_ext_nvm_channels[] = { 149, 153, 157, 161, 165, 169, 173, 177, 181 }; -#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) -#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) +#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) +#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define NUM_2GHZ_CHANNELS_EXT 14 #define FIRST_2GHZ_HT_MINUS 5 @@ -301,11 +305,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, const u8 *nvm_chan; if (cfg->nvm_type != IWL_NVM_EXT) { - num_of_ch = IWL_NUM_CHANNELS; + num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = &iwl_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS; } else { - num_of_ch = IWL_NUM_CHANNELS_EXT; + num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = &iwl_ext_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; } @@ -720,12 +724,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, + IWL_NVM_NUM_CHANNELS, GFP_KERNEL); else data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS_EXT, + IWL_NVM_NUM_CHANNELS_EXT, GFP_KERNEL); if (!data) return NULL; @@ -859,7 +863,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int valid_rules = 0; bool new_rule; int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? - IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; + IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); @@ -938,3 +942,199 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, return regd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); + +#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 +#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc +#define MAX_NVM_FILE_LEN 16384 + +void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, + unsigned int len) +{ +#define IWL_4165_DEVICE_ID 0x5501 +#define NVM_SKU_CAP_MIMO_DISABLE BIT(5) + + if (section == NVM_SECTION_TYPE_PHY_SKU && + hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && + (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) + /* OTP 0x52 bug work around: it's a 1x1 device */ + data[3] = ANT_B | (ANT_B << 4); +} +IWL_EXPORT_SYMBOL(iwl_nvm_fixups); + +/* + * Reads external NVM from a file into mvm->nvm_sections + * + * HOW TO CREATE THE NVM FILE FORMAT: + * ------------------------------ + * 1. create hex file, format: + * 3800 -> header + * 0000 -> header + * 5a40 -> data + * + * rev - 6 bit (word1) + * len - 10 bit (word1) + * id - 4 bit (word2) + * rsv - 12 bit (word2) + * + * 2. flip 8bits with 8 bits per line to get the right NVM file format + * + * 3. create binary file from the hex file + * + * 4. save as "iNVM_xxx.bin" under /lib/firmware + */ +int iwl_read_external_nvm(struct iwl_trans *trans, + const char *nvm_file_name, + struct iwl_nvm_section *nvm_sections) +{ + int ret, section_size; + u16 section_id; + const struct firmware *fw_entry; + const struct { + __le16 word1; + __le16 word2; + u8 data[]; + } *file_sec; + const u8 *eof; + u8 *temp; + int max_section_size; + const __le32 *dword_buff; + +#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) +#define NVM_WORD2_ID(x) (x >> 12) +#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) +#define EXT_NVM_WORD1_ID(x) ((x) >> 4) +#define NVM_HEADER_0 (0x2A504C54) +#define NVM_HEADER_1 (0x4E564D2A) +#define NVM_HEADER_SIZE (4 * sizeof(u32)) + + IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); + + /* Maximal size depends on NVM version */ + if (trans->cfg->nvm_type != IWL_NVM_EXT) + max_section_size = IWL_MAX_NVM_SECTION_SIZE; + else + max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; + + /* + * Obtain NVM image via request_firmware. Since we already used + * request_firmware_nowait() for the firmware binary load and only + * get here after that we assume the NVM request can be satisfied + * synchronously. + */ + ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); + if (ret) { + IWL_ERR(trans, "ERROR: %s isn't available %d\n", + nvm_file_name, ret); + return ret; + } + + IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", + nvm_file_name, fw_entry->size); + + if (fw_entry->size > MAX_NVM_FILE_LEN) { + IWL_ERR(trans, "NVM file too large\n"); + ret = -EINVAL; + goto out; + } + + eof = fw_entry->data + fw_entry->size; + dword_buff = (__le32 *)fw_entry->data; + + /* some NVM file will contain a header. + * The header is identified by 2 dwords header as follow: + * dword[0] = 0x2A504C54 + * dword[1] = 0x4E564D2A + * + * This header must be skipped when providing the NVM data to the FW. + */ + if (fw_entry->size > NVM_HEADER_SIZE && + dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && + dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { + file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); + IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); + IWL_INFO(trans, "NVM Manufacturing date %08X\n", + le32_to_cpu(dword_buff[3])); + + /* nvm file validation, dword_buff[2] holds the file version */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && + le32_to_cpu(dword_buff[2]) < 0xE4A) { + ret = -EFAULT; + goto out; + } + } else { + file_sec = (void *)fw_entry->data; + } + + while (true) { + if (file_sec->data > eof) { + IWL_ERR(trans, + "ERROR - NVM file too short for section header\n"); + ret = -EINVAL; + break; + } + + /* check for EOF marker */ + if (!file_sec->word1 && !file_sec->word2) { + ret = 0; + break; + } + + if (trans->cfg->nvm_type != IWL_NVM_EXT) { + section_size = + 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); + section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); + } else { + section_size = 2 * EXT_NVM_WORD2_LEN( + le16_to_cpu(file_sec->word2)); + section_id = EXT_NVM_WORD1_ID( + le16_to_cpu(file_sec->word1)); + } + + if (section_size > max_section_size) { + IWL_ERR(trans, "ERROR - section too large (%d)\n", + section_size); + ret = -EINVAL; + break; + } + + if (!section_size) { + IWL_ERR(trans, "ERROR - section empty\n"); + ret = -EINVAL; + break; + } + + if (file_sec->data + section_size > eof) { + IWL_ERR(trans, + "ERROR - NVM file too short for section (%d bytes)\n", + section_size); + ret = -EINVAL; + break; + } + + if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, + "Invalid NVM section ID %d\n", section_id)) { + ret = -EINVAL; + break; + } + + temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } + + iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); + + kfree(nvm_sections[section_id].data); + nvm_sections[section_id].data = temp; + nvm_sections[section_id].length = section_size; + + /* advance to the next section */ + file_sec = (void *)(file_sec->data + section_size); + } +out: + release_firmware(fw_entry); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_read_external_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 306736c7a042..6b108cae61b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -109,4 +111,25 @@ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc); +/** + * struct iwl_nvm_section - describes an NVM section in memory. + * + * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, + * and saved for later use by the driver. Not all NVM sections are saved + * this way, only the needed ones. + */ +struct iwl_nvm_section { + u16 length; + const u8 *data; +}; + +/** + * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections + */ +int iwl_read_external_nvm(struct iwl_trans *trans, + const char *nvm_file_name, + struct iwl_nvm_section *nvm_sections); +void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, + unsigned int len); + #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 4cf3e32e3dba..d5a612add59f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -79,6 +81,8 @@ #include "mvm.h" #include "fw/dbg.h" #include "iwl-phy-db.h" +#include "iwl-modparams.h" +#include "iwl-nvm-parse.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) @@ -381,7 +385,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Load NVM to NIC if needed */ if (mvm->nvm_file_name) { - iwl_mvm_read_external_nvm(mvm); + iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, + mvm->nvm_sections); iwl_mvm_load_nvm_to_nic(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2411cc91f833..6a4ba160c59e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -92,7 +92,7 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" -#include "fw/debugfs.h" +#include "iwl-nvm-parse.h" #include @@ -508,18 +508,6 @@ enum iwl_mvm_sched_scan_pass_all_states { SCHED_SCAN_PASS_ALL_FOUND, }; -/** - * struct iwl_nvm_section - describes an NVM section in memory. - * - * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, - * and saved for later use by the driver. Not all NVM sections are saved - * this way, only the needed ones. - */ -struct iwl_nvm_section { - u16 length; - const u8 *data; -}; - /** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill @@ -1511,7 +1499,6 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); -int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 5bfe5306524c..cf48517944ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -76,9 +78,7 @@ #include "fw/acpi.h" /* Default NVM size to read */ -#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) -#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 -#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc +#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024) #define NVM_WRITE_OPCODE 1 #define NVM_READ_OPCODE 0 @@ -229,19 +229,6 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, return 0; } -static void iwl_mvm_nvm_fixups(struct iwl_mvm *mvm, unsigned int section, - u8 *data, unsigned int len) -{ -#define IWL_4165_DEVICE_ID 0x5501 -#define NVM_SKU_CAP_MIMO_DISABLE BIT(5) - - if (section == NVM_SECTION_TYPE_PHY_SKU && - mvm->trans->hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && - (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) - /* OTP 0x52 bug work around: it's a 1x1 device */ - data[3] = ANT_B | (ANT_B << 4); -} - /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read @@ -282,7 +269,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } - iwl_mvm_nvm_fixups(mvm, section, data, offset); + iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); @@ -355,184 +342,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) lar_enabled); } -#define MAX_NVM_FILE_LEN 16384 - -/* - * Reads external NVM from a file into mvm->nvm_sections - * - * HOW TO CREATE THE NVM FILE FORMAT: - * ------------------------------ - * 1. create hex file, format: - * 3800 -> header - * 0000 -> header - * 5a40 -> data - * - * rev - 6 bit (word1) - * len - 10 bit (word1) - * id - 4 bit (word2) - * rsv - 12 bit (word2) - * - * 2. flip 8bits with 8 bits per line to get the right NVM file format - * - * 3. create binary file from the hex file - * - * 4. save as "iNVM_xxx.bin" under /lib/firmware - */ -int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) -{ - int ret, section_size; - u16 section_id; - const struct firmware *fw_entry; - const struct { - __le16 word1; - __le16 word2; - u8 data[]; - } *file_sec; - const u8 *eof; - u8 *temp; - int max_section_size; - const __le32 *dword_buff; - -#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) -#define NVM_WORD2_ID(x) (x >> 12) -#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) -#define EXT_NVM_WORD1_ID(x) ((x) >> 4) -#define NVM_HEADER_0 (0x2A504C54) -#define NVM_HEADER_1 (0x4E564D2A) -#define NVM_HEADER_SIZE (4 * sizeof(u32)) - - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); - - /* Maximal size depends on NVM version */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) - max_section_size = IWL_MAX_NVM_SECTION_SIZE; - else - max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; - - /* - * Obtain NVM image via request_firmware. Since we already used - * request_firmware_nowait() for the firmware binary load and only - * get here after that we assume the NVM request can be satisfied - * synchronously. - */ - ret = request_firmware(&fw_entry, mvm->nvm_file_name, - mvm->trans->dev); - if (ret) { - IWL_ERR(mvm, "ERROR: %s isn't available %d\n", - mvm->nvm_file_name, ret); - return ret; - } - - IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", - mvm->nvm_file_name, fw_entry->size); - - if (fw_entry->size > MAX_NVM_FILE_LEN) { - IWL_ERR(mvm, "NVM file too large\n"); - ret = -EINVAL; - goto out; - } - - eof = fw_entry->data + fw_entry->size; - dword_buff = (__le32 *)fw_entry->data; - - /* some NVM file will contain a header. - * The header is identified by 2 dwords header as follow: - * dword[0] = 0x2A504C54 - * dword[1] = 0x4E564D2A - * - * This header must be skipped when providing the NVM data to the FW. - */ - if (fw_entry->size > NVM_HEADER_SIZE && - dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && - dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { - file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); - IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); - IWL_INFO(mvm, "NVM Manufacturing date %08X\n", - le32_to_cpu(dword_buff[3])); - - /* nvm file validation, dword_buff[2] holds the file version */ - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP && - le32_to_cpu(dword_buff[2]) < 0xE4A) { - ret = -EFAULT; - goto out; - } - } else { - file_sec = (void *)fw_entry->data; - } - - while (true) { - if (file_sec->data > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section header\n"); - ret = -EINVAL; - break; - } - - /* check for EOF marker */ - if (!file_sec->word1 && !file_sec->word2) { - ret = 0; - break; - } - - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { - section_size = - 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); - section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); - } else { - section_size = 2 * EXT_NVM_WORD2_LEN( - le16_to_cpu(file_sec->word2)); - section_id = EXT_NVM_WORD1_ID( - le16_to_cpu(file_sec->word1)); - } - - if (section_size > max_section_size) { - IWL_ERR(mvm, "ERROR - section too large (%d)\n", - section_size); - ret = -EINVAL; - break; - } - - if (!section_size) { - IWL_ERR(mvm, "ERROR - section empty\n"); - ret = -EINVAL; - break; - } - - if (file_sec->data + section_size > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section (%d bytes)\n", - section_size); - ret = -EINVAL; - break; - } - - if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, - "Invalid NVM section ID %d\n", section_id)) { - ret = -EINVAL; - break; - } - - temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } - - iwl_mvm_nvm_fixups(mvm, section_id, temp, section_size); - - kfree(mvm->nvm_sections[section_id].data); - mvm->nvm_sections[section_id].data = temp; - mvm->nvm_sections[section_id].length = section_size; - - /* advance to the next section */ - file_sec = (void *)(file_sec->data + section_size); - } -out: - release_firmware(fw_entry); - return ret; -} - /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) { @@ -585,7 +394,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) break; } - iwl_mvm_nvm_fixups(mvm, section, temp, ret); + iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; @@ -624,14 +433,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { /* read External NVM file from the mod param */ - ret = iwl_mvm_read_external_nvm(mvm); + ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, + mvm->nvm_sections); if (ret) { mvm->nvm_file_name = nvm_file_C; if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) { /* in case nvm file was failed try again */ - ret = iwl_mvm_read_external_nvm(mvm); + ret = iwl_read_external_nvm(mvm->trans, + mvm->nvm_file_name, + mvm->nvm_sections); if (ret) return ret; } else { -- cgit v1.2.3 From 8f66e064c9db418c6c3037aa2c217dc7b18f3276 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 17 Jan 2018 16:35:39 +0200 Subject: iwlwifi: mvm: use the new get_tid function This saves some typing and is overall more readable. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 7 +++---- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 11 ++++------- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f9cd7575b422..34791628cfb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -112,7 +112,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, return -1; if (ieee80211_is_data_qos(hdr->frame_control)) - tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); else tid = 0; @@ -347,8 +347,7 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, if (ieee80211_is_data_qos(hdr->frame_control)) /* frame has qos control */ - tid = *ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); else tid = IWL_MAX_TID_COUNT; @@ -628,7 +627,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; - u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + u8 tid = ieee80211_get_tid(hdr); u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 90381348697c..d0916f2552e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -767,7 +767,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 snap_ip_tcp, pad; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; - u8 *qc, tid, txf; + u8 tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); @@ -790,8 +790,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } - qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; @@ -846,7 +845,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); if (num_subframes > 1) - *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; @@ -1007,9 +1006,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, * assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) goto drop_unlock_sta; -- cgit v1.2.3 From 3506832b098850a8aafee19512e79c33debab380 Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Sun, 28 Jan 2018 18:27:52 +0200 Subject: iwlwifi: pcie: gen2: fix race in cmd fifo write ptr Avoid a race where two (or more) commands get the same index: 1. T1 calls enqueue_hcmd and the local TFD index is assigned to txq->write_ptr; 2. Context switch 'before incrementing txq->write_ptr'; 3. T2 calls enqueue_hcmd and the local TFD index is assigned to txq->write_ptr; 4. Now the index is set to the same value for both commands of T1 and T2. To prevent this from happening, set the local TFD index inside the critical section (the index is set by global txq write pointer). Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index a235dda1f70b..48890a1c825f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -569,15 +569,13 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; - int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + int i, cmd_pos, idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); - - memset(tfd, 0, sizeof(*tfd)); + struct iwl_tfh_tfd *tfd; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); @@ -648,6 +646,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); + memset(tfd, 0, sizeof(*tfd)); + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); -- cgit v1.2.3 From 4883145a8e17fd0c232a80dbc212eaebf57b061d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 29 Jan 2018 10:00:05 +0200 Subject: iwlwifi: mvm: set the MFP flag for keys that are used by MFP stations 22000 devices rely on this flag to install the key to the right queues. For earlier devices we didn't have a key / queue mapping and the key was sent along with the Tx command for each Tx hence the problem didn't arise. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 80067eb9ea05..4f9d3e13d7e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -2887,7 +2888,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, - u8 key_offset) + u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; @@ -2960,6 +2961,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + if (mfp) + key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; @@ -3101,11 +3104,13 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; + bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); sta_id = mvm_sta->sta_id; + mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -3127,7 +3132,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - seq.tkip.iv32, p1k, 0, key_offset); + seq.tkip.iv32, p1k, 0, key_offset, + mfp); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: @@ -3135,11 +3141,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset); + 0, NULL, 0, key_offset, mfp); break; default: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset); + 0, NULL, 0, key_offset, mfp); } return ret; @@ -3366,6 +3372,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + bool mfp = sta ? sta->mfp : false; rcu_read_lock(); @@ -3373,7 +3380,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(!mvm_sta)) goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, - iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); + iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, + mfp); unlock: rcu_read_unlock(); -- cgit v1.2.3 From 2c2b4bbc5d1f5c37e16d108f7a0c4e2a36c4f423 Mon Sep 17 00:00:00 2001 From: Naftali Goldstein Date: Mon, 15 Jan 2018 12:32:30 +0200 Subject: iwlwifi: mvm: update rs-fw API Update rs-fw API to match changes in FW. Specifically, the TLC_MNG_NOTIF_REQ_CMD command and TLC_MNG_AMSDU_ENABLE_NOTIF notification are removed, the A-MSDU related info is received from FW via the TLC_MNG_UPDATE_NOTIF, and the TLC_MNG_CONFIG_CMD uses version 2 of its data structure. Additionally, constify some arguments in a couple of functions. Signed-off-by: Naftali Goldstein Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/datapath.h | 10 -- drivers/net/wireless/intel/iwlwifi/fw/api/rs.h | 168 +++++++-------------- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 - drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 143 +++++++----------- drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 2 - drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 4 +- 7 files changed, 114 insertions(+), 218 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 184cee98c359..5f6e855006dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -87,16 +87,6 @@ enum iwl_data_path_subcmd_ids { */ TLC_MNG_CONFIG_CMD = 0xF, - /** - * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd - */ - TLC_MNG_NOTIF_REQ_CMD = 0x10, - - /** - * @TLC_MNG_AMSDU_ENABLE_NOTIF: &struct iwl_tlc_amsdu_notif - */ - TLC_MNG_AMSDU_ENABLE_NOTIF = 0xF6, - /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 1a8f3154a1fe..21e13a315421 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,62 +66,38 @@ /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags - * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support - * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC - * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER - * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM */ enum iwl_tlc_mng_cfg_flags { - IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0), - IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1), - IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2), - IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3), - IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4), - IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5), + IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), + IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), }; /** * enum iwl_tlc_mng_cfg_cw - channel width options - * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value + * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value */ enum iwl_tlc_mng_cfg_cw { - IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_20MHZ, + IWL_TLC_MNG_CH_WIDTH_40MHZ, + IWL_TLC_MNG_CH_WIDTH_80MHZ, + IWL_TLC_MNG_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, }; /** * enum iwl_tlc_mng_cfg_chains - possible chains * @IWL_TLC_MNG_CHAIN_A_MSK: chain A * @IWL_TLC_MNG_CHAIN_B_MSK: chain B - * @IWL_TLC_MNG_CHAIN_C_MSK: chain C */ enum iwl_tlc_mng_cfg_chains { IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), - IWL_TLC_MNG_CHAIN_C_MSK = BIT(2), -}; - -/** - * enum iwl_tlc_mng_cfg_gi - guard interval options - * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ - * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ - * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ - * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ - */ -enum iwl_tlc_mng_cfg_gi { - IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0), - IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1), - IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2), - IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3), }; /** @@ -145,25 +123,7 @@ enum iwl_tlc_mng_cfg_mode { }; /** - * enum iwl_tlc_mng_vht_he_types - VHT HE types - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types - */ -enum iwl_tlc_mng_vht_he_types { - IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM = - IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, - -}; - -/** - * enum iwl_tlc_mng_ht_rates - HT/VHT rates + * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 @@ -174,6 +134,8 @@ enum iwl_tlc_mng_vht_he_types { * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 + * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10 + * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11 * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT */ enum iwl_tlc_mng_ht_rates { @@ -187,95 +149,73 @@ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MCS7, IWL_TLC_MNG_HT_RATE_MCS8, IWL_TLC_MNG_HT_RATE_MCS9, - IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9, + IWL_TLC_MNG_HT_RATE_MCS10, + IWL_TLC_MNG_HT_RATE_MCS11, + IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; /* Maximum supported tx antennas number */ -#define MAX_RS_ANT_NUM 3 +#define MAX_NSS 2 /** * struct tlc_config_cmd - TLC configuration * @sta_id: station id * @reserved1: reserved - * @max_supp_ch_width: channel width - * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags - * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains - * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported - * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types - * @non_ht_supp_rates: bitmap of supported legacy rates - * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9 + * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode + * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @amsdu: TX amsdu is supported - * @he_supp_rates: bitmap of supported HE rates + * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags + * @non_ht_rates: bitmap of supported legacy rates + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per + * pair (0 - 80mhz width and below, 1 - 160mhz). + * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width - * @he_gi_support: 11ax HE guard interval - * @max_ampdu_cnt: max AMPDU size (frames count) + * use BIT(@enum iwl_tlc_mng_cfg_cw) + * @reserved2: reserved */ struct iwl_tlc_config_cmd { u8 sta_id; u8 reserved1[3]; - u8 max_supp_ch_width; - u8 chains; - u8 max_supp_ss; - u8 valid_vht_he_types; - __le16 flags; - __le16 non_ht_supp_rates; - __le16 ht_supp_rates[MAX_RS_ANT_NUM]; + u8 max_ch_width; u8 mode; + u8 chains; u8 amsdu; - __le16 he_supp_rates; + __le16 flags; + __le16 non_ht_rates; + __le16 ht_rates[MAX_NSS][2]; + __le16 max_mpdu_len; u8 sgi_ch_width_supp; - u8 he_gi_support; - __le32 max_ampdu_cnt; -} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_tlc_amsdu_notif - TLC AMSDU configuration - * @sta_id: station id - * @reserved: reserved - * @amsdu_size: Max AMSDU size, in bytes - * @amsdu_enabled: bitmap for per-TID AMSDU enablement - */ -struct iwl_tlc_amsdu_notif { - u8 sta_id; - u8 reserved[3]; - __le16 amsdu_size; - __le16 amsdu_enabled; -} __packed; /* TLC_MNG_AMSDU_ENABLE_NTFY_API_S_VER_1 */ - -#define IWL_TLC_NOTIF_INIT_RATE_POS 0 -#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS) -#define IWL_TLC_NOTIF_REQ_INTERVAL (500) + u8 reserved2[1]; +} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */ /** - * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes - * @sta_id: relevant station - * @reserved1: reserved - * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\* - * @interval: minimum time between notifications from TLC to the driver (msec) - * @reserved2: reserved + * enum iwl_tlc_update_flags - updated fields + * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update + * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update */ -struct iwl_tlc_notif_req_config_cmd { - u8 sta_id; - u8 reserved1; - __le16 flags; - __le16 interval; - __le16 reserved2; -} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */ +enum iwl_tlc_update_flags { + IWL_TLC_NOTIF_FLAG_RATE = BIT(0), + IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1), +}; /** * struct iwl_tlc_update_notif - TLC notification from FW * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported - * @values: field per flag in struct iwl_tlc_notif_req_config_cmd + * @rate: current initial rate + * @amsdu_size: Max AMSDU size, in bytes + * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ struct iwl_tlc_update_notif { u8 sta_id; - u8 reserved; - __le16 flags; - __le32 values[16]; -} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */ + u8 reserved[3]; + __le32 flags; + __le32 rate; + __le32 amsdu_size; + __le32 amsdu_enabled; +} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ /** * enum iwl_tlc_debug_flags - debug options diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 687bed30c851..14e69e7a2287 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -151,6 +151,8 @@ enum iwl_nvm_type { #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) +#define MAX_ANT_NUM 3 + static inline u8 num_of_ant(u8 mask) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 1ef4ac21f32a..de46e6258a5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -312,8 +312,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), - RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_AMSDU_ENABLE_NOTIF, - iwl_mvm_tlc_amsdu_notif, RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -450,7 +448,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), - HCMD_NAME(TLC_MNG_AMSDU_ENABLE_NOTIF), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 4e818bce469b..b8b2b819e8e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -65,14 +67,14 @@ static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) { switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: - return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ; + return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: - return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ; + return IWL_TLC_MNG_CH_WIDTH_80MHZ; case IEEE80211_STA_RX_BW_40: - return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ; + return IWL_TLC_MNG_CH_WIDTH_40MHZ; case IEEE80211_STA_RX_BW_20: default: - return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ; + return IWL_TLC_MNG_CH_WIDTH_20MHZ; } } @@ -85,7 +87,9 @@ static u8 rs_fw_set_active_chains(u8 chains) if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; if (chains & ANT_C) - fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK; + WARN(false, + "tlc offload doesn't support antenna C. chains: 0x%x\n", + chains); return fw_chains; } @@ -97,13 +101,13 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) u8 supp = 0; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) - supp |= IWL_TLC_MNG_SGI_20MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) - supp |= IWL_TLC_MNG_SGI_40MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) - supp |= IWL_TLC_MNG_SGI_80MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) - supp |= IWL_TLC_MNG_SGI_160MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); return supp; } @@ -114,9 +118,7 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; bool vht_ena = vht_cap && vht_cap->vht_supported; - u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK | - IWL_TLC_MNG_CFG_FLAGS_DCM_MSK | - IWL_TLC_MNG_CFG_FLAGS_DD_MSK; + u16 flags = 0; if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && @@ -129,16 +131,11 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) - flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK; - return flags; } static -int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, +int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & @@ -160,15 +157,16 @@ int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, return 0; } -static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta, - struct ieee80211_sta_vht_cap *vht_cap, - struct iwl_tlc_config_cmd *cmd) +static void +rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, + const struct ieee80211_sta_vht_cap *vht_cap, + struct iwl_tlc_config_cmd *cmd) { u16 supp; int i, highest_mcs; for (i = 0; i < sta->rx_nss; i++) { - if (i == MAX_RS_ANT_NUM) + if (i == MAX_NSS) break; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); @@ -179,7 +177,9 @@ static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta, if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); - cmd->ht_supp_rates[i] = cpu_to_le16(supp); + cmd->ht_rates[i][0] = cpu_to_le16(supp); + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + cmd->ht_rates[i][1] = cmd->ht_rates[i][0]; } } @@ -190,8 +190,8 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, int i; unsigned long tmp; unsigned long supp; /* must be unsigned long for for_each_set_bit */ - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; /* non HT rates */ supp = 0; @@ -199,99 +199,71 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); - cmd->non_ht_supp_rates = cpu_to_le16(supp); + cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; - /* HT/VHT rates */ if (vht_cap && vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap && ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; - cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); + cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); + cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } -static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id) -{ - u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0); - struct iwl_tlc_notif_req_config_cmd cfg_cmd = { - .sta_id = sta_id, - .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK), - .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL), - }; - int ret; - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); - if (ret) - IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret); -} - -void iwl_mvm_tlc_amsdu_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tlc_amsdu_notif *notif; + struct iwl_tlc_update_notif *notif; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; - u16 size; - - notif = (void *)pkt->data; - - if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) - return; + struct iwl_lq_sta_rs_fw *lq_sta; + u32 flags; rcu_read_lock(); + notif = (void *)pkt->data; sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (IS_ERR_OR_NULL(sta)) { - rcu_read_unlock(); IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); - return; + goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); - size = min(le16_to_cpu(notif->amsdu_size), sta->max_amsdu_len); - mvmsta->amsdu_enabled = le16_to_cpu(notif->amsdu_enabled); - mvmsta->max_amsdu_len = size; - - IWL_DEBUG_RATE(mvm, - "AMSDU notification. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", - le16_to_cpu(notif->amsdu_size), size, - mvmsta->amsdu_enabled); - - rcu_read_unlock(); -}; - -void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tlc_update_notif *notif; - struct iwl_mvm_sta *mvmsta; - struct iwl_lq_sta_rs_fw *lq_sta; - - rcu_read_lock(); - - notif = (void *)pkt->data; - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id); - if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); goto out; } + flags = le32_to_cpu(notif->flags); + lq_sta = &mvmsta->lq_sta.rs_fw; - if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) { - lq_sta->last_rate_n_flags = - le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]); + if (flags & IWL_TLC_NOTIF_FLAG_RATE) { + lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags); } + + if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) { + u16 size = le32_to_cpu(notif->amsdu_size); + + if (WARN_ON(sta->max_amsdu_len < size)) + goto out; + + mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); + mvmsta->max_amsdu_len = size; + + IWL_DEBUG_RATE(mvm, + "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", + le32_to_cpu(notif->amsdu_size), size, + mvmsta->amsdu_enabled); + } out: rcu_read_unlock(); } @@ -306,11 +278,10 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband; struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, - .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta), + .max_ch_width = rs_fw_bw_from_sta_bw(sta), .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), - .max_supp_ss = sta->rx_nss, - .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize), + .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), .amsdu = iwl_mvm_is_csum_supported(mvm), }; @@ -327,8 +298,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); - - rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id); } int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 736ac9642921..5e89141656c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -456,6 +456,4 @@ int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_tlc_amsdu_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); #endif /* __rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index e597bc193e5c..0497c7a44def 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) u8 ind = last_idx; int i; - for (i = 0; i < MAX_RS_ANT_NUM; i++) { - ind = (ind + 1) % MAX_RS_ANT_NUM; + for (i = 0; i < MAX_ANT_NUM; i++) { + ind = (ind + 1) % MAX_ANT_NUM; if (valid & BIT(ind)) return ind; } -- cgit v1.2.3 From 4ae80f6c8d86c415527b8bfff7c85ebe6268fc9c Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Mon, 14 Aug 2017 13:06:48 +0300 Subject: iwlwifi: support api ver2 of NVM_GET_INFO resp NVM_GET_INFO API has changed to support indication of 11ax support. Signed-off-by: Liad Kaufman Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 42 ++++++++++++++-------- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 14 +++++--- .../net/wireless/intel/iwlwifi/iwl-eeprom-parse.h | 1 + 3 files changed, 38 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 37c57bcbfb4a..8d6dc9189985 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -189,23 +189,37 @@ struct iwl_nvm_get_info_general { u8 reserved; } __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ +/** + * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku + * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled + * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled + * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled + * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled + * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled + * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled + * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled + * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled + * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled + */ +enum iwl_nvm_mac_sku_flags { + NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0), + NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), + NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), + NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), + NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), + NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), + NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), + NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14), + NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15), +}; + /** * struct iwl_nvm_get_info_sku - mac information - * @enable_24g: band 2.4G enabled - * @enable_5g: band 5G enabled - * @enable_11n: 11n enabled - * @enable_11ac: 11ac enabled - * @mimo_disable: MIMO enabled - * @ext_crypto: Extended crypto enabled + * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags */ struct iwl_nvm_get_info_sku { - __le32 enable_24g; - __le32 enable_5g; - __le32 enable_11n; - __le32 enable_11ac; - __le32 mimo_disable; - __le32 ext_crypto; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ + __le32 mac_sku_flags; +} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */ /** * struct iwl_nvm_get_info_phy - phy information @@ -243,7 +257,7 @@ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c index bd2e1fb43f5a..ff44d9031e1a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -86,6 +86,7 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + u32 mac_flags; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) @@ -125,16 +126,19 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); /* Initialize MAC sku data */ + mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); nvm->sku_cap_11ac_enable = - le32_to_cpu(rsp->mac_sku.enable_11ac); + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = - le32_to_cpu(rsp->mac_sku.enable_11n); + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); + nvm->sku_cap_11ax_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_24g); + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_5g); + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = - le32_to_cpu(rsp->mac_sku.mimo_disable); + !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); /* Initialize PHY sku data */ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index b33888991b94..16bea75e82f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -85,6 +85,7 @@ struct iwl_nvm_data { bool sku_cap_band_52GHz_enable; bool sku_cap_11n_enable; bool sku_cap_11ac_enable; + bool sku_cap_11ax_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; bool sku_cap_mimo_disabled; -- cgit v1.2.3 From 4b82455ca51ed23cf0fd6a24ba72a40fd4794b82 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 15 Aug 2017 20:26:09 +0300 Subject: iwlwifi: use flags to denote modifiers for the channel maps Instead of having a boolean for each modifier we need to handle in the channel maps, create a bitmask with flags that denote each modification. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 10 +++++---- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 25 ++++++++++++++-------- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 14 ++++++++++-- 3 files changed, 34 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c index ff44d9031e1a..beefb89194a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -87,6 +87,7 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); u32 mac_flags; + u32 sbands_flags = 0; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) @@ -144,15 +145,16 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - /* Initialize regulatory data */ - nvm->lar_enabled = - le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; + if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + nvm->lar_enabled = true; + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + } iwl_init_sbands(trans->dev, trans->cfg, nvm, rsp->regulatory.channel_profile, nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, - nvm->lar_enabled, false); + sbands_flags); iwl_free_resp(&hcmd); return nvm; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 3437ed480e31..735b211364d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -295,7 +295,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 * const nvm_ch_flags, - bool lar_supported, bool no_wide_in_5ghz) + u32 sbands_flags) { int ch_idx; int n_channels = 0; @@ -323,7 +323,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, continue; /* workaround to disable wide channels in 5GHz */ - if (no_wide_in_5ghz && is_5ghz) { + if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && + is_5ghz) { ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ); @@ -332,7 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; - if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { + if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && + !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is * supported, hence we still want to add them to @@ -362,7 +364,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->max_power = IWL_DEFAULT_MAX_TX_POWER; /* don't put limitations in case we're using LAR */ - if (!lar_supported) + if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, is_5ghz, ch_flags, cfg); @@ -460,15 +462,14 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported, - bool no_wide_in_5ghz) + u8 tx_chains, u8 rx_chains, u32 sbands_flags) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, - lar_supported, no_wide_in_5ghz); + sbands_flags); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -716,8 +717,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; - bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw); u32 sku, radio_cfg; + u32 sbands_flags = 0; u16 lar_config; const __le16 *ch_section; @@ -790,8 +791,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } + if (lar_fw_supported && lar_enabled) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + + if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; + iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, - lar_fw_supported && lar_enabled, no_wide_in_5ghz); + sbands_flags); data->calib_version = 255; return data; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 6b108cae61b8..868baa508a16 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -69,6 +69,17 @@ #include #include "iwl-eeprom-parse.h" +/** + * enum iwl_nvm_sbands_flags - modification flags for the channel profiles + * + * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled + * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz + */ +enum iwl_nvm_sbands_flags { + IWL_NVM_SBANDS_FLAGS_LAR = BIT(0), + IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), +}; + /** * iwl_parse_nvm_data - parse NVM data and return values * @@ -95,8 +106,7 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, */ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported, - bool no_wide_in_5ghz); + u8 tx_chains, u8 rx_chains, u32 sbands_flags); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW -- cgit v1.2.3 From 28e9c00fe1f0b1ff811d23dcffbf632b85f33797 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 17 Aug 2017 19:02:04 +0300 Subject: iwlwifi: remove upper case letters in sku_capa_band_*_enable The sku_capa_band_24GHz_enable and sku_capa_band_52GHz_enable symbols cause checkpatch to complain whenever we use them. To prevent this, convert them to all lower case. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 8 ++++---- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index e68254e12764..030482b357a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1200,16 +1200,16 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) return -EINVAL; } - if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable && - !data->sku_cap_band_52GHz_enable) { + if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable && + !data->sku_cap_band_52ghz_enable) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } IWL_DEBUG_INFO(priv, "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n", - data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", - data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled", data->sku_cap_11n_enable ? "" : "NOT", "enabled"); priv->hw_params.tx_chains_num = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c index beefb89194a9..b8d88df27b18 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -134,9 +134,9 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); - nvm->sku_cap_band_24GHz_enable = + nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); - nvm->sku_cap_band_52GHz_enable = + nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 3199d345b427..777f5df8a0c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -899,8 +899,8 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, EEPROM_SKU_CAP); data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE; data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE; - data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; + data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 16bea75e82f1..8be50ed12300 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -81,8 +81,8 @@ struct iwl_nvm_data { __le16 kelvin_voltage; __le16 xtal_calib[2]; - bool sku_cap_band_24GHz_enable; - bool sku_cap_band_52GHz_enable; + bool sku_cap_band_24ghz_enable; + bool sku_cap_band_52ghz_enable; bool sku_cap_11n_enable; bool sku_cap_11ac_enable; bool sku_cap_11ax_enable; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 735b211364d2..a6246dc9f025 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -319,7 +319,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); - if (is_5ghz && !data->sku_cap_band_52GHz_enable) + if (is_5ghz && !data->sku_cap_band_52ghz_enable) continue; /* workaround to disable wide channels in 5GHz */ @@ -745,8 +745,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, rx_chains &= data->valid_rx_ant; sku = iwl_get_sku(cfg, nvm_sw, phy_sku); - data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; + data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; -- cgit v1.2.3 From 4c625c564ba2cf92fdd9cb7387d30be04139f77b Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Wed, 24 Jan 2018 16:12:49 +0200 Subject: iwlwifi: get rid of fw/nvm.c There's already an opmode common file for nvm iwl-nvm-parse.c Move the content of fw/nvm.c to iwl-nvm-parse.c and delete fw/nvm.c. Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 168 --------------------- drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 1 - drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 111 +++++++++++++- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 21 +-- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 2 +- 6 files changed, 114 insertions(+), 191 deletions(-) delete mode 100644 drivers/net/wireless/intel/iwlwifi/fw/nvm.c (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index e6205eae51fd..4d08d78c6b71 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -13,7 +13,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o -iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c deleted file mode 100644 index b8d88df27b18..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ /dev/null @@ -1,168 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "iwl-drv.h" -#include "runtime.h" -#include "fw/api/nvm-reg.h" -#include "fw/api/commands.h" -#include "iwl-nvm-parse.h" - -struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) -{ - struct iwl_nvm_get_info cmd = {}; - struct iwl_nvm_get_info_rsp *rsp; - struct iwl_trans *trans = fwrt->trans; - struct iwl_nvm_data *nvm; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &cmd, }, - .len = { sizeof(cmd) }, - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) - }; - int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - u32 mac_flags; - u32 sbands_flags = 0; - - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - return ERR_PTR(ret); - - if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), - "Invalid payload len in NVM response from FW %d", - iwl_rx_packet_payload_len(hcmd.resp_pkt))) { - ret = -EINVAL; - goto out; - } - - rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) - IWL_INFO(fwrt, "OTP is empty\n"); - - nvm = kzalloc(sizeof(*nvm) + - sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, - GFP_KERNEL); - if (!nvm) { - ret = -ENOMEM; - goto out; - } - - iwl_set_hw_address_from_csr(trans, nvm); - /* TODO: if platform NVM has MAC address - override it here */ - - if (!is_valid_ether_addr(nvm->hw_addr)) { - IWL_ERR(fwrt, "no valid mac address was found\n"); - ret = -EINVAL; - goto err_free; - } - - IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); - - /* Initialize general data */ - nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); - - /* Initialize MAC sku data */ - mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); - nvm->sku_cap_11ac_enable = - !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); - nvm->sku_cap_11n_enable = - !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); - nvm->sku_cap_11ax_enable = - !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); - nvm->sku_cap_band_24ghz_enable = - !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); - nvm->sku_cap_band_52ghz_enable = - !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); - nvm->sku_cap_mimo_disabled = - !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); - - /* Initialize PHY sku data */ - nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); - nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - - if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { - nvm->lar_enabled = true; - sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; - } - - iwl_init_sbands(trans->dev, trans->cfg, nvm, - rsp->regulatory.channel_profile, - nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, - nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, - sbands_flags); - - iwl_free_resp(&hcmd); - return nvm; - -err_free: - kfree(nvm); -out: - iwl_free_resp(&hcmd); - return ERR_PTR(ret); -} -IWL_EXPORT_SYMBOL(iwl_fw_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 3fb940ebd74a..d8db1dd100b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -170,6 +170,5 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, struct iwl_rx_cmd_buffer *rxb); -struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index a6246dc9f025..6d33c14579d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -80,6 +80,9 @@ #include "iwl-csr.h" #include "fw/acpi.h" #include "fw/api/nvm-reg.h" +#include "fw/api/commands.h" +#include "fw/api/cmdhdr.h" +#include "fw/img.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -460,9 +463,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } -void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, u32 sbands_flags) +static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 *nvm_ch_flags, u8 tx_chains, + u8 rx_chains, u32 sbands_flags) { int n_channels; int n_used = 0; @@ -495,7 +499,6 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } -IWL_EXPORT_SYMBOL(iwl_init_sbands); static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) @@ -573,8 +576,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) dest[5] = hw_addr[0]; } -void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data) +static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); @@ -592,7 +595,6 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } -IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr); static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, @@ -1145,3 +1147,98 @@ out: return ret; } IWL_EXPORT_SYMBOL(iwl_read_external_nvm); + +struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, + const struct iwl_fw *fw) +{ + struct iwl_nvm_get_info cmd = {}; + struct iwl_nvm_get_info_rsp *rsp; + struct iwl_nvm_data *nvm; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) + }; + int ret; + bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && + fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + u32 mac_flags; + u32 sbands_flags = 0; + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + return ERR_PTR(ret); + + if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), + "Invalid payload len in NVM response from FW %d", + iwl_rx_packet_payload_len(hcmd.resp_pkt))) { + ret = -EINVAL; + goto out; + } + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(trans, "OTP is empty\n"); + + nvm = kzalloc(sizeof(*nvm) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!nvm) { + ret = -ENOMEM; + goto out; + } + + iwl_set_hw_address_from_csr(trans, nvm); + /* TODO: if platform NVM has MAC address - override it here */ + + if (!is_valid_ether_addr(nvm->hw_addr)) { + IWL_ERR(trans, "no valid mac address was found\n"); + ret = -EINVAL; + goto err_free; + } + + IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); + + /* Initialize general data */ + nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + + /* Initialize MAC sku data */ + mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); + nvm->sku_cap_11ac_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); + nvm->sku_cap_11n_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); + nvm->sku_cap_band_24ghz_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); + nvm->sku_cap_band_52ghz_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); + nvm->sku_cap_mimo_disabled = + !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); + + /* Initialize PHY sku data */ + nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); + nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); + + if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + nvm->lar_enabled = true; + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + } + + iwl_init_sbands(trans->dev, trans->cfg, nvm, + rsp->regulatory.channel_profile, + nvm->valid_tx_ant & fw->valid_tx_ant, + nvm->valid_rx_ant & fw->valid_rx_ant, + sbands_flags); + + iwl_free_resp(&hcmd); + return nvm; + +err_free: + kfree(nvm); +out: + iwl_free_resp(&hcmd); + return ERR_PTR(ret); +} +IWL_EXPORT_SYMBOL(iwl_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 868baa508a16..60c7586f8342 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -95,19 +95,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); -/** - * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on - */ -void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data); - -/** - * iwl_init_sbands - parse and set all channel profiles - */ -void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, u32 sbands_flags); - /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * @@ -142,4 +129,12 @@ int iwl_read_external_nvm(struct iwl_trans *trans, void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len); +/** + * iwl_get_nvm - retrieve NVM data from firmware + * + * Allocates a new iwl_nvm_data structure, fills it with + * NVM data, and returns it to caller. + */ +struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, + const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index d5a612add59f..866c91c923be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -415,7 +415,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && read_nvm) { - mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt); + mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; -- cgit v1.2.3 From bd8f3fc613919b50038c949f80b3f350a166293e Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Wed, 17 Jan 2018 15:25:28 +0200 Subject: iwlwifi: mvm: support 22000 HW opening agg before traffic When trying to open aggregations on 22000 HW before traffic had actually passed, the driver will discover it is missing a queue to aggregate on. In such a case - allocate a queue. Signed-off-by: Liad Kaufman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4f9d3e13d7e5..4ddd2c427b83 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -36,6 +36,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -2467,6 +2468,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); + if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && + iwl_mvm_has_new_tx_api(mvm)) { + u8 ac = tid_to_mac80211_ac[tid]; + + ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); + if (ret) + return ret; + } + spin_lock_bh(&mvmsta->lock); /* possible race condition - we entered D0i3 while starting agg */ -- cgit v1.2.3 From 0263ea5cddedd84c111b55d33a8ec94740d1c8d3 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 02:00:47 +0200 Subject: net: phy: mdio-boardinfo: Allow recursive mdiobus_register() mdiobus_register will search for any mdiobus board info registered for the bus being registered. If found, it will probe devices on the bus. That device, if for example it is an ethernet switch, may then try to register an mdio bus. Thus we need to allow recursive calls to mdiobus_register. Holding the mdio_board_lock will cause a deadlock during this recursion. Release the lock and use list_for_each_entry_safe. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-boardinfo.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 1861f387820d..863496fa5d13 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -30,17 +30,20 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, struct mdio_board_info *bi)) { struct mdio_board_entry *be; + struct mdio_board_entry *tmp; struct mdio_board_info *bi; int ret; mutex_lock(&mdio_board_lock); - list_for_each_entry(be, &mdio_board_list, list) { + list_for_each_entry_safe(be, tmp, &mdio_board_list, list) { bi = &be->board_info; if (strcmp(bus->id, bi->bus_id)) continue; + mutex_unlock(&mdio_board_lock); ret = cb(bus, bi); + mutex_lock(&mdio_board_lock); if (ret) continue; -- cgit v1.2.3 From 6b9227d666f2efe0f8ed234827bb1abdf63f9501 Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Thu, 19 Apr 2018 16:24:53 +0900 Subject: net: ethernet: ave: add multiple clocks and resets support as required property When the link is becoming up for Pro4 SoC, the kernel is stalled due to some missing clocks and resets. The AVE block for Pro4 is connected to the GIO bus in the SoC. Without its clock/reset, the access to the AVE register makes the system stall. In the same way, another MAC clock for Giga-bit Connection and the PHY clock are also required for Pro4 to activate the Giga-bit feature and to recognize the PHY. To satisfy these requirements, this patch adds support for multiple clocks and resets, and adds the clock-names and reset-names to the binding because we need to distinguish clock/reset for the AVE main block and the others. Also, make the resets a required property. Currently, "reset is optional" relies on that the bootloader or firmware has deasserted the reset before booting the kernel. Drivers should work without such expectation. Fixes: 4c270b55a5af ("net: ethernet: socionext: add AVE ethernet driver") Suggested-by: Masahiro Yamada Signed-off-by: Kunihiko Hayashi Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- .../bindings/net/socionext,uniphier-ave4.txt | 13 ++- drivers/net/ethernet/socionext/sni_ave.c | 108 ++++++++++++++++----- 2 files changed, 96 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt index 96398cc2982f..85e0c49548ed 100644 --- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt +++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt @@ -17,9 +17,18 @@ Required properties: - phy-handle: Should point to the external phy device. See ethernet.txt file in the same directory. - clocks: A phandle to the clock for the MAC. + For Pro4 SoC, that is "socionext,uniphier-pro4-ave4", + another MAC clock, GIO bus clock and PHY clock are also required. + - clock-names: Should contain + - "ether", "ether-gb", "gio", "ether-phy" for Pro4 SoC + - "ether" for others + - resets: A phandle to the reset control for the MAC. For Pro4 SoC, + GIO bus reset is also required. + - reset-names: Should contain + - "ether", "gio" for Pro4 SoC + - "ether" for others Optional properties: - - resets: A phandle to the reset control for the MAC. - local-mac-address: See ethernet.txt in the same directory. Required subnode: @@ -34,7 +43,9 @@ Example: interrupts = <0 66 4>; phy-mode = "rgmii"; phy-handle = <ðphy>; + clock-names = "ether"; clocks = <&sys_clk 6>; + reset-names = "ether"; resets = <&sys_rst 6>; local-mac-address = [00 00 00 00 00 00]; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 0b3b7a460641..52940bdd4ad3 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -199,6 +199,9 @@ #define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) +#define AVE_MAX_CLKS 4 +#define AVE_MAX_RSTS 2 + enum desc_id { AVE_DESCID_RX, AVE_DESCID_TX, @@ -227,6 +230,8 @@ struct ave_desc_info { struct ave_soc_data { bool is_desc_64bit; + const char *clock_names[AVE_MAX_CLKS]; + const char *reset_names[AVE_MAX_RSTS]; }; struct ave_stats { @@ -245,8 +250,10 @@ struct ave_private { int phy_id; unsigned int desc_size; u32 msg_enable; - struct clk *clk; - struct reset_control *rst; + int nclks; + struct clk *clk[AVE_MAX_CLKS]; + int nrsts; + struct reset_control *rst[AVE_MAX_RSTS]; phy_interface_t phy_mode; struct phy_device *phydev; struct mii_bus *mdio; @@ -1153,18 +1160,23 @@ static int ave_init(struct net_device *ndev) struct device_node *np = dev->of_node; struct device_node *mdio_np; struct phy_device *phydev; - int ret; + int nc, nr, ret; /* enable clk because of hw access until ndo_open */ - ret = clk_prepare_enable(priv->clk); - if (ret) { - dev_err(dev, "can't enable clock\n"); - return ret; + for (nc = 0; nc < priv->nclks; nc++) { + ret = clk_prepare_enable(priv->clk[nc]); + if (ret) { + dev_err(dev, "can't enable clock\n"); + goto out_clk_disable; + } } - ret = reset_control_deassert(priv->rst); - if (ret) { - dev_err(dev, "can't deassert reset\n"); - goto out_clk_disable; + + for (nr = 0; nr < priv->nrsts; nr++) { + ret = reset_control_deassert(priv->rst[nr]); + if (ret) { + dev_err(dev, "can't deassert reset\n"); + goto out_reset_assert; + } } ave_global_reset(ndev); @@ -1207,9 +1219,11 @@ static int ave_init(struct net_device *ndev) out_mdio_unregister: mdiobus_unregister(priv->mdio); out_reset_assert: - reset_control_assert(priv->rst); + while (--nr >= 0) + reset_control_assert(priv->rst[nr]); out_clk_disable: - clk_disable_unprepare(priv->clk); + while (--nc >= 0) + clk_disable_unprepare(priv->clk[nc]); return ret; } @@ -1217,13 +1231,16 @@ out_clk_disable: static void ave_uninit(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); + int i; phy_disconnect(priv->phydev); mdiobus_unregister(priv->mdio); /* disable clk because of hw access after ndo_stop */ - reset_control_assert(priv->rst); - clk_disable_unprepare(priv->clk); + for (i = 0; i < priv->nrsts; i++) + reset_control_assert(priv->rst[i]); + for (i = 0; i < priv->nclks; i++) + clk_disable_unprepare(priv->clk[i]); } static int ave_open(struct net_device *ndev) @@ -1527,8 +1544,9 @@ static int ave_probe(struct platform_device *pdev) struct resource *res; const void *mac_addr; void __iomem *base; + const char *name; + int i, irq, ret; u64 dma_mask; - int irq, ret; u32 ave_id; data = of_device_get_match_data(dev); @@ -1614,16 +1632,28 @@ static int ave_probe(struct platform_device *pdev) u64_stats_init(&priv->stats_tx.syncp); u64_stats_init(&priv->stats_rx.syncp); - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto out_free_netdev; + for (i = 0; i < AVE_MAX_CLKS; i++) { + name = priv->data->clock_names[i]; + if (!name) + break; + priv->clk[i] = devm_clk_get(dev, name); + if (IS_ERR(priv->clk[i])) { + ret = PTR_ERR(priv->clk[i]); + goto out_free_netdev; + } + priv->nclks++; } - priv->rst = devm_reset_control_get_optional_shared(dev, NULL); - if (IS_ERR(priv->rst)) { - ret = PTR_ERR(priv->rst); - goto out_free_netdev; + for (i = 0; i < AVE_MAX_RSTS; i++) { + name = priv->data->reset_names[i]; + if (!name) + break; + priv->rst[i] = devm_reset_control_get_shared(dev, name); + if (IS_ERR(priv->rst[i])) { + ret = PTR_ERR(priv->rst[i]); + goto out_free_netdev; + } + priv->nrsts++; } priv->mdio = devm_mdiobus_alloc(dev); @@ -1687,22 +1717,52 @@ static int ave_remove(struct platform_device *pdev) static const struct ave_soc_data ave_pro4_data = { .is_desc_64bit = false, + .clock_names = { + "gio", "ether", "ether-gb", "ether-phy", + }, + .reset_names = { + "gio", "ether", + }, }; static const struct ave_soc_data ave_pxs2_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, }; static const struct ave_soc_data ave_ld11_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, }; static const struct ave_soc_data ave_ld20_data = { .is_desc_64bit = true, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, }; static const struct ave_soc_data ave_pxs3_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, }; static const struct of_device_id of_ave_match[] = { -- cgit v1.2.3 From 57878f2f4697a4f4ae53586fa7565ed1a3275a2c Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Thu, 19 Apr 2018 16:24:55 +0900 Subject: net: ethernet: ave: add support for phy-mode setting of system controller This patch adds support for specifying system controller that configures phy-mode setting. According to the DT property "phy-mode", it's necessary to configure the controller, which is used to choose the settings of the MAC suitable, for example, mdio pin connections, internal clocks, and so on. Supported phy-modes are SoC-dependent. The driver allows phy-mode to set "internal" if the SoC has a built-in PHY, and {"mii", "rmii", "rgmii"} if the SoC supports each mode. So we have to check whether the phy-mode is valid or not. This adds the following features for each SoC: - check whether the SoC supports the specified phy-mode - configure the controller accroding to phy-mode The DT property accepts one argument to distinguish them for multiple MAC instances. ethernet@65000000 { ... socionext,syscon-phy-mode = <&soc_glue 0>; }; ethernet@65200000 { ... socionext,syscon-phy-mode = <&soc_glue 1>; }; Signed-off-by: Kunihiko Hayashi Signed-off-by: Masahiro Yamada Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/Kconfig | 2 + drivers/net/ethernet/socionext/sni_ave.c | 150 ++++++++++++++++++++++++++++--- 2 files changed, 140 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig index 6bcfe27fc560..b80048ca82a0 100644 --- a/drivers/net/ethernet/socionext/Kconfig +++ b/drivers/net/ethernet/socionext/Kconfig @@ -14,6 +14,8 @@ if NET_VENDOR_SOCIONEXT config SNI_AVE tristate "Socionext AVE ethernet support" depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + depends on HAS_IOMEM + select MFD_SYSCON select PHYLIB ---help--- Driver for gigabit ethernet MACs, called AVE, in the diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 52940bdd4ad3..f7ecceeb1e28 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -197,6 +199,11 @@ #define AVE_INTM_COUNT 20 #define AVE_FORCE_TXINTCNT 1 +/* SG */ +#define SG_ETPINMODE 0x540 +#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */ +#define SG_ETPINMODE_RMII(ins) BIT(ins) + #define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) #define AVE_MAX_CLKS 4 @@ -228,12 +235,6 @@ struct ave_desc_info { struct ave_desc *desc; /* skb info related descriptor */ }; -struct ave_soc_data { - bool is_desc_64bit; - const char *clock_names[AVE_MAX_CLKS]; - const char *reset_names[AVE_MAX_RSTS]; -}; - struct ave_stats { struct u64_stats_sync syncp; u64 packets; @@ -257,6 +258,9 @@ struct ave_private { phy_interface_t phy_mode; struct phy_device *phydev; struct mii_bus *mdio; + struct regmap *regmap; + unsigned int pinmode_mask; + unsigned int pinmode_val; /* stats */ struct ave_stats stats_rx; @@ -279,6 +283,14 @@ struct ave_private { const struct ave_soc_data *data; }; +struct ave_soc_data { + bool is_desc_64bit; + const char *clock_names[AVE_MAX_CLKS]; + const char *reset_names[AVE_MAX_RSTS]; + int (*get_pinmode)(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg); +}; + static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, int offset) { @@ -1179,6 +1191,11 @@ static int ave_init(struct net_device *ndev) } } + ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, + priv->pinmode_mask, priv->pinmode_val); + if (ret) + return ret; + ave_global_reset(ndev); mdio_np = of_get_child_by_name(np, "mdio"); @@ -1537,6 +1554,7 @@ static int ave_probe(struct platform_device *pdev) const struct ave_soc_data *data; struct device *dev = &pdev->dev; char buf[ETHTOOL_FWVERS_LEN]; + struct of_phandle_args args; phy_interface_t phy_mode; struct ave_private *priv; struct net_device *ndev; @@ -1559,12 +1577,6 @@ static int ave_probe(struct platform_device *pdev) dev_err(dev, "phy-mode not found\n"); return -EINVAL; } - if ((!phy_interface_mode_is_rgmii(phy_mode)) && - phy_mode != PHY_INTERFACE_MODE_RMII && - phy_mode != PHY_INTERFACE_MODE_MII) { - dev_err(dev, "phy-mode is invalid\n"); - return -EINVAL; - } irq = platform_get_irq(pdev, 0); if (irq < 0) { @@ -1656,6 +1668,26 @@ static int ave_probe(struct platform_device *pdev) priv->nrsts++; } + ret = of_parse_phandle_with_fixed_args(np, + "socionext,syscon-phy-mode", + 1, 0, &args); + if (ret) { + netdev_err(ndev, "can't get syscon-phy-mode property\n"); + goto out_free_netdev; + } + priv->regmap = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(priv->regmap)) { + netdev_err(ndev, "can't map syscon-phy-mode\n"); + ret = PTR_ERR(priv->regmap); + goto out_free_netdev; + } + ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]); + if (ret) { + netdev_err(ndev, "invalid phy-mode setting\n"); + goto out_free_netdev; + } + priv->mdio = devm_mdiobus_alloc(dev); if (!priv->mdio) { ret = -ENOMEM; @@ -1715,6 +1747,95 @@ static int ave_remove(struct platform_device *pdev) return 0; } +static int ave_pro4_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld11_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_INTERNAL: + priv->pinmode_val = 0; + break; + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld20_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_pxs3_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 1) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(arg); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(arg); + break; + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + static const struct ave_soc_data ave_pro4_data = { .is_desc_64bit = false, .clock_names = { @@ -1723,6 +1844,7 @@ static const struct ave_soc_data ave_pro4_data = { .reset_names = { "gio", "ether", }, + .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_pxs2_data = { @@ -1733,6 +1855,7 @@ static const struct ave_soc_data ave_pxs2_data = { .reset_names = { "ether", }, + .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_ld11_data = { @@ -1743,6 +1866,7 @@ static const struct ave_soc_data ave_ld11_data = { .reset_names = { "ether", }, + .get_pinmode = ave_ld11_get_pinmode, }; static const struct ave_soc_data ave_ld20_data = { @@ -1753,6 +1877,7 @@ static const struct ave_soc_data ave_ld20_data = { .reset_names = { "ether", }, + .get_pinmode = ave_ld20_get_pinmode, }; static const struct ave_soc_data ave_pxs3_data = { @@ -1763,6 +1888,7 @@ static const struct ave_soc_data ave_pxs3_data = { .reset_names = { "ether", }, + .get_pinmode = ave_pxs3_get_pinmode, }; static const struct of_device_id of_ave_match[] = { -- cgit v1.2.3 From cea395ac868dee9104aa4fff640486cf4b3c464c Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Wed, 18 Apr 2018 23:18:28 -0700 Subject: liquidio: Added ndo_get_vf_stats support Added the ndo to gather VF statistics through the PF. Collect VF statistics via mailbox from VF. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/cn23xx_pf_device.c | 54 ++++++++++++++++++++++ .../ethernet/cavium/liquidio/cn23xx_pf_device.h | 12 +++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 26 +++++++++++ .../net/ethernet/cavium/liquidio/octeon_mailbox.c | 52 +++++++++++++++++++++ .../net/ethernet/cavium/liquidio/octeon_mailbox.h | 7 +++ 5 files changed, 151 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index e8b290473ee2..bc9861c90ea3 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1497,3 +1497,57 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, octeon_mbox_write(oct, &mbox_cmd); } } + +static void +cn23xx_get_vf_stats_callback(struct octeon_device *oct, + struct octeon_mbox_cmd *cmd, void *arg) +{ + struct oct_vf_stats_ctx *ctx = arg; + + memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats)); + atomic_set(&ctx->status, 1); +} + +int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx, + struct oct_vf_stats *stats) +{ + u32 timeout = HZ; // 1sec + struct octeon_mbox_cmd mbox_cmd; + struct oct_vf_stats_ctx ctx; + u32 count = 0, ret; + + if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx))) + return -1; + + if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data)) + return -1; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback; + ctx.stats = stats; + atomic_set(&ctx.status, 0); + mbox_cmd.fn_arg = (void *)&ctx; + memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data)); + octeon_mbox_write(oct, &mbox_cmd); + + do { + schedule_timeout_uninterruptible(1); + } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout)); + + ret = atomic_read(&ctx.status); + if (ret == 0) { + octeon_mbox_cancel(oct, 0); + dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n", + vfidx); + return -1; + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 2aba5247b6d8..63b3de4f2bfe 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -43,6 +43,15 @@ struct octeon_cn23xx_pf { #define CN23XX_SLI_DEF_BP 0x40 +struct oct_vf_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 broadcast; + u64 multicast; +}; + int setup_cn23xx_octeon_pf_device(struct octeon_device *oct); int validate_cn23xx_pf_config_info(struct octeon_device *oct, @@ -56,4 +65,7 @@ int cn23xx_fw_loaded(struct octeon_device *oct); void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, u8 *mac); + +int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx, + struct oct_vf_stats *stats); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 44c2a0c1bbdd..f3891ae11b02 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3326,6 +3326,31 @@ static const struct switchdev_ops lio_pf_switchdev_ops = { .switchdev_port_attr_get = lio_pf_switchdev_attr_get, }; +static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, + struct ifla_vf_stats *vf_stats) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_vf_stats stats; + int ret; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + memset(&stats, 0, sizeof(struct oct_vf_stats)); + ret = cn23xx_get_vf_stats(oct, vfidx, &stats); + if (!ret) { + vf_stats->rx_packets = stats.rx_packets; + vf_stats->tx_packets = stats.tx_packets; + vf_stats->rx_bytes = stats.rx_bytes; + vf_stats->tx_bytes = stats.tx_bytes; + vf_stats->broadcast = stats.broadcast; + vf_stats->multicast = stats.multicast; + } + + return ret; +} + static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, @@ -3348,6 +3373,7 @@ static const struct net_device_ops lionetdevops = { .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, + .ndo_get_vf_stats = liquidio_get_vf_stats, }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 28e74ee23ff8..021d99cd1665 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -24,6 +24,7 @@ #include "octeon_device.h" #include "octeon_main.h" #include "octeon_mailbox.h" +#include "cn23xx_pf_device.h" /** * octeon_mbox_read: @@ -205,6 +206,26 @@ int octeon_mbox_write(struct octeon_device *oct, return ret; } +static void get_vf_stats(struct octeon_device *oct, + struct oct_vf_stats *stats) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + if (!oct->instr_queue[i]) + continue; + stats->tx_packets += oct->instr_queue[i]->stats.tx_done; + stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes; + } + + for (i = 0; i < oct->num_oqs; i++) { + if (!oct->droq[i]) + continue; + stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; + stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received; + } +} + /** * octeon_mbox_process_cmd: * @mbox: Pointer mailbox @@ -250,6 +271,15 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox, mbox_cmd->msg.s.params); break; + case OCTEON_GET_VF_STATS: + dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n"); + mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE; + mbox_cmd->msg.s.resp_needed = 1; + mbox_cmd->msg.s.len = 1 + + sizeof(struct oct_vf_stats) / sizeof(u64); + get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data); + octeon_mbox_write(oct, mbox_cmd); + break; default: break; } @@ -322,3 +352,25 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox) return 0; } + +int octeon_mbox_cancel(struct octeon_device *oct, int q_no) +{ + struct octeon_mbox *mbox = oct->mbox[q_no]; + struct octeon_mbox_cmd *mbox_cmd; + unsigned long flags = 0; + + spin_lock_irqsave(&mbox->lock, flags); + mbox_cmd = &mbox->mbox_resp; + + if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return 1; + } + + mbox->state = OCTEON_MBOX_STATE_IDLE; + memset(mbox_cmd, 0, sizeof(*mbox_cmd)); + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index 1def22afeff1..d92bd7e16477 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -25,6 +25,7 @@ #define OCTEON_VF_ACTIVE 0x1 #define OCTEON_VF_FLR_REQUEST 0x2 #define OCTEON_PF_CHANGED_VF_MACADDR 0x4 +#define OCTEON_GET_VF_STATS 0x8 /*Macro for Read acknowldgement*/ #define OCTEON_PFVFACK 0xffffffffffffffffULL @@ -107,9 +108,15 @@ struct octeon_mbox { }; +struct oct_vf_stats_ctx { + atomic_t status; + struct oct_vf_stats *stats; +}; + int octeon_mbox_read(struct octeon_mbox *mbox); int octeon_mbox_write(struct octeon_device *oct, struct octeon_mbox_cmd *mbox_cmd); int octeon_mbox_process_message(struct octeon_mbox *mbox); +int octeon_mbox_cancel(struct octeon_device *oct, int q_no); #endif -- cgit v1.2.3 From f1d22a1e0595639bcfef9941ebc0e3cfe9711009 Mon Sep 17 00:00:00 2001 From: George Wilkie Date: Thu, 19 Apr 2018 11:34:14 +0100 Subject: team: account for oper state Account for operational state when determining port linkup state, as per Documentation/networking/operstates.txt. Signed-off-by: George Wilkie Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a6c6ce19eeee..8a8611095ca0 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2918,7 +2918,7 @@ static int team_device_event(struct notifier_block *unused, case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, - !!netif_carrier_ok(port->dev)); + !!netif_oper_up(port->dev)); break; case NETDEV_UNREGISTER: team_del_slave(port->team->dev, dev); -- cgit v1.2.3 From 4c52a889ab8ea7e965dc0ba40968768c5e91972f Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 19 Apr 2018 15:42:29 +0300 Subject: geneve: remove white-space before '#if IS_ENABLED(CONFIG_IPV6)' Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b919e89a9b93..45acdc9f986e 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1261,7 +1261,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], } if (data[IFLA_GENEVE_REMOTE6]) { - #if IS_ENABLED(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IPV6) if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { attrtype = IFLA_GENEVE_REMOTE6; goto change_notsup; -- cgit v1.2.3 From 5edbea6987b305346a6ba9cf854e148b4ae814f0 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 19 Apr 2018 15:42:30 +0300 Subject: geneve: cleanup hard coded value for Ethernet header length Use ETH_HLEN instead and introduce two new macros: GENEVE_IPV4_HLEN and GENEVE_IPV6_HLEN that include Ethernet header length, corresponded IP header length and GENEVE_BASE_HLEN. Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller --- drivers/net/geneve.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 45acdc9f986e..b650f845af25 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -36,6 +36,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); #define GENEVE_VER 0 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) +#define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN) +#define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN) /* per-network namespace private data for this module */ struct geneve_net { @@ -826,8 +828,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); if (skb_dst(skb)) { - int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - - GENEVE_BASE_HLEN - info->options_len - 14; + int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN - + info->options_len; skb_dst_update_pmtu(skb, mtu); } @@ -872,8 +874,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(dst); if (skb_dst(skb)) { - int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - - GENEVE_BASE_HLEN - info->options_len - 14; + int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len; skb_dst_update_pmtu(skb, mtu); } -- cgit v1.2.3 From 321acc1c68c13ce113531a8bc3dc5d43814c4ae2 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 19 Apr 2018 15:42:31 +0300 Subject: geneve: check MTU for a minimum in geneve_change_mtu() geneve_change_mtu() will be used not only as ndo_change_mtu() callback, but also to verify a user specified MTU on a new link creation in the next patch. Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller --- drivers/net/geneve.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b650f845af25..ae649f699a92 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -942,11 +942,10 @@ tx_error: static int geneve_change_mtu(struct net_device *dev, int new_mtu) { - /* Only possible if called internally, ndo_change_mtu path's new_mtu - * is guaranteed to be between dev->min_mtu and dev->max_mtu. - */ if (new_mtu > dev->max_mtu) new_mtu = dev->max_mtu; + else if (new_mtu < dev->min_mtu) + new_mtu = dev->min_mtu; dev->mtu = new_mtu; return 0; -- cgit v1.2.3 From c40e89fd358e94a55d6c1475afbea17b5580f601 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 19 Apr 2018 15:42:32 +0300 Subject: geneve: configure MTU based on a lower device Currently, on a new link creation or when 'remote' address parameter is updated, an MTU is not changed and always equals 1500. When a lower device has a larger MTU, it might not be efficient, e.g. for UDP, and requires the manual MTU adjustments to match the MTU of the lower device. This patch tries to automate this process, finds a lower device using the 'remote' address parameter, then uses its MTU to tune GENEVE's MTU: * on a new link creation * when 'remote' parameter is changed Also with this patch, the MTU from a user, on a new link creation, is passed to geneve_change_mtu() where it is verified, and MTU adjustments with a lower device is skipped in that case. Prior that change, it was possible to set the invalid MTU values on a new link creation. Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller --- drivers/net/geneve.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 53 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ae649f699a92..750eaa53bf0c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1387,6 +1387,48 @@ change_notsup: return -EOPNOTSUPP; } +static void geneve_link_config(struct net_device *dev, + struct ip_tunnel_info *info, struct nlattr *tb[]) +{ + struct geneve_dev *geneve = netdev_priv(dev); + int ldev_mtu = 0; + + if (tb[IFLA_MTU]) { + geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + return; + } + + switch (ip_tunnel_info_af(info)) { + case AF_INET: { + struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst }; + struct rtable *rt = ip_route_output_key(geneve->net, &fl4); + + if (!IS_ERR(rt) && rt->dst.dev) { + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN; + ip_rt_put(rt); + } + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: { + struct rt6_info *rt = rt6_lookup(geneve->net, + &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); + + if (rt && rt->dst.dev) + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; + ip6_rt_put(rt); + break; + } +#endif + } + + if (ldev_mtu <= 0) + return; + + geneve_change_mtu(dev, ldev_mtu - info->options_len); +} + static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -1402,8 +1444,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (err) return err; - return geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums); + err = geneve_configure(net, dev, extack, &info, metadata, + use_udp6_rx_checksums); + if (err) + return err; + + geneve_link_config(dev, &info, tb); + + return 0; } /* Quiesces the geneve device data path for both TX and RX. @@ -1477,8 +1525,10 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; - if (!geneve_dst_addr_equal(&geneve->info, &info)) + if (!geneve_dst_addr_equal(&geneve->info, &info)) { dst_cache_reset(&info.dst_cache); + geneve_link_config(dev, &info, tb); + } geneve_quiesce(geneve, &gs4, &gs6); geneve->info = info; -- cgit v1.2.3 From 7425d8220f8d0c2127aec75677f652a26f86bc95 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 19 Apr 2018 05:50:11 -0700 Subject: qed* : use trust mode to allow VF to override forced MAC As per existing behavior, when PF sets a MAC address for a VF (also called as forced MAC), VF is not allowed to change its MAC address afterwards. This puts the limitation on few use cases such as bonding of VFs, where bonding driver asks VF to change its MAC address. This patch uses a VF trust mode to allow VF to change its MAC address in spite PF has set a forced MAC for that VF. Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 210 +++++++++++++++++++++++-- drivers/net/ethernet/qlogic/qede/qede_filter.c | 3 +- 2 files changed, 195 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5acb91b3564c..77376fda8eae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code); - +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { @@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; - if (events & BIT(MAC_ADDR_FORCED)) { + if ((events & BIT(MAC_ADDR_FORCED)) || + p_vf->p_vf_info.is_trusted_configured) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, "PF failed to configure MAC for VF\n"); return rc; } - - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_vf->p_vf_info.is_trusted_configured) + p_vf->configured_features |= + BIT(VFPF_BULLETIN_MAC_ADDR); + else + p_vf->configured_features |= + BIT(MAC_ADDR_FORCED); } if (events & BIT(VLAN_ADDR_FORCED)) { @@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; + /* Don't keep track of shadow copy since we don't intend to restore. */ + if (p_vf->p_vf_info.is_trusted_configured) + return 0; + /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { @@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, /* No real decision to make; Store the configured MAC */ if (params->type == QED_FILTER_MAC || - params->type == QED_FILTER_MAC_VLAN) + params->type == QED_FILTER_MAC_VLAN) { ether_addr_copy(vf->mac, params->mac); + if (vf->is_trusted_configured) { + qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); + + /* Update and post bulleitin again */ + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + return 0; } @@ -4081,16 +4098,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (vf_info->p_vf_info.is_trusted_configured) { + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + /* Trust mode will disable Forced MAC */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + } else { + feature = BIT(MAC_ADDR_FORCED); + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(VFPF_BULLETIN_MAC_ADDR); + } + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; - /* Forced MAC will disable MAC_ADDR */ - vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return -EINVAL; + } + + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + if (vf_info->p_vf_info.is_trusted_configured) + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); + + return 0; +} + static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, u16 pvid, int vfid) { @@ -4204,6 +4265,21 @@ out: return rc; } +static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + BIT(VFPF_BULLETIN_MAC_ADDR))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { @@ -4493,8 +4569,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) if (!vf_info) continue; - /* Set the forced MAC, and schedule the IOV task */ - ether_addr_copy(vf_info->forced_mac, mac); + /* Set the MAC, and schedule the IOV task */ + if (vf_info->is_trusted_configured) + ether_addr_copy(vf_info->mac, mac); + else + ether_addr_copy(vf_info->forced_mac, mac); + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); } @@ -4802,6 +4882,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, + u8 *mac, + struct qed_public_vf_info *info) +{ + if (info->is_trusted_configured) { + if (is_valid_ether_addr(info->mac) && + (!mac || !ether_addr_equal(mac, info->mac))) + return true; + } else { + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) + return true; + } + + return false; +} + +static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, + struct qed_public_vf_info *info, + int vfid) +{ + if (info->is_trusted_configured) + qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); + else + qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); +} + static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) { int i; @@ -4816,18 +4923,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) continue; /* Update data on bulletin board */ - mac = qed_iov_bulletin_get_forced_mac(hwfn, i); - if (is_valid_ether_addr(info->forced_mac) && - (!mac || !ether_addr_equal(mac, info->forced_mac))) { + if (info->is_trusted_configured) + mac = qed_iov_bulletin_get_mac(hwfn, i); + else + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + + if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); - /* Update bulletin board with forced MAC */ - qed_iov_bulletin_set_forced_mac(hwfn, - info->forced_mac, i); + /* Update bulletin board with MAC */ + qed_set_bulletin_mac(hwfn, info, i); update = true; } @@ -4867,6 +4976,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) +{ + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 *force_mac; + int i; + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + vf = qed_iov_get_vf_info(hwfn, vf_id, true); + + if (!vf_info || !vf) + return; + + /* Force MAC converted to generic MAC in case of VF trust on */ + if (vf_info->is_trusted_configured && + (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { + force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); + + if (force_mac) { + /* Clear existing shadow copy of MAC to have a clean + * slate. + */ + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + vf_info->mac)) { + memset(vf->shadow_config.macs[i], 0, + ETH_ALEN); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", + vf_info->mac, vf_id); + break; + } + } + + ether_addr_copy(vf_info->mac, force_mac); + memset(vf_info->forced_mac, 0, ETH_ALEN); + vf->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + + /* Update shadow copy with VF MAC when trust mode is turned off */ + if (!vf_info->is_trusted_configured) { + u8 empty_mac[ETH_ALEN]; + + memset(empty_mac, 0, ETH_ALEN); + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + empty_mac)) { + ether_addr_copy(vf->shadow_config.macs[i], + vf_info->mac); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", + vf_info->mac, vf_id); + break; + } + } + /* Clear bulletin when trust mode is turned off, + * to have a clean slate for next (normal) operations. + */ + qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) { struct qed_sp_vport_update_params params; @@ -4890,6 +5065,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) continue; vf_info->is_trusted_configured = vf_info->is_trusted_request; + /* Handle forced MAC mode */ + qed_update_mac_for_vf_trust_change(hwfn, i); + /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); if (!vf->vport_instance) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6687e04d1558..8094f035b705 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -550,8 +550,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) __qede_lock(edev); - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) { + if (!is_valid_ether_addr(mac)) { __qede_unlock(edev); return; } -- cgit v1.2.3 From 809c45a091d93e05c6e9b5d53bb3f1185273286b Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 19 Apr 2018 05:50:12 -0700 Subject: qed* : Add new TLV to request PF to update MAC in bulletin board There may be a need for VF driver to request PF to explicitly update its bulletin with a MAC address. e.g. When user assigns a MAC address to VF while VF is still down, and PF's bulletin board contains different MAC address, in this case, when VF's interface is brought up, it gets loaded with MAC address from bulletin board which is not desirable. To handle this corner case, we need a new TLV to request PF to update its bulletin board with suggested MAC. This request will be honored only for trusted VFs. Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 19 +++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 37 ++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.c | 29 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 21 +++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_filter.c | 4 +++ include/linux/qed/qed_eth_if.h | 1 + 6 files changed, 111 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e874504e8b28..8b1b7e8ca56c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2850,6 +2850,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +{ + int i, ret; + + if (IS_PF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); + if (ret) + return ret; + } + + return 0; +} + #ifdef CONFIG_QED_SRIOV extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif @@ -2887,6 +2905,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, .get_coalesce = &qed_get_coalesce, + .req_bulletin_update_mac = &qed_req_bulletin_update_mac, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 77376fda8eae..f01bf52bc381 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3820,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); } +static int +qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct vfpf_bulletin_update_mac_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; + + if (!p_vf->p_vf_info.is_trusted_configured) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Blocking bulletin update request from untrusted VF[%d]\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + rc = -EINVAL; + goto send_status; + } + + p_req = &mbx->req_virt->bulletin_update_mac; + ether_addr_copy(p_bulletin->mac, p_req->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Updated bulletin of VF[%d] with requested MAC[%pM]\n", + p_vf->abs_vf_id, p_req->mac); + +send_status: + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(struct pfvf_def_resp_tlv), status); + return rc; +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -3899,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_READ: qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_BULLETIN_UPDATE_MAC: + qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 91b5e9f02a62..2d7fcd6a0777 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1374,6 +1374,35 @@ exit: return rc; } +int +qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_bulletin_update_mac_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + int rc; + + if (!p_mac) + return -EINVAL; + + /* clear mailbox and prep header tlv */ + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(*p_req)); + ether_addr_copy(p_req->mac, p_mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requesting bulletin update for MAC[%pM]\n", p_mac); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 97d44dfb38ca..4f05d5eb3cf5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv { u8 padding[6]; }; +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -532,6 +538,7 @@ union vfpf_tlvs { struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; struct tlv_buffer_size tlv_buf_size; }; @@ -650,6 +657,7 @@ enum { CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tunn); u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); +/** + * @brief - Ask PF to update the MAC address in it's bulletin board + * + * @param p_mac - mac address to be updated in bulletin board + */ +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); + #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) @@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, return -EINVAL; } +static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + return -EINVAL; +} + static inline u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 8094f035b705..43569b1839be 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1160,6 +1160,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "The device is currently down\n"); + /* Ask PF to explicitly update a copy in bulletin board */ + if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) + edev->ops->req_bulletin_update_mac(edev->cdev, + ndev->dev_addr); goto out; } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 147d08ccf813..7f9756fe9180 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -352,6 +352,7 @@ struct qed_eth_ops { int (*configure_arfs_searcher)(struct qed_dev *cdev, enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); + int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); }; const struct qed_eth_ops *qed_get_eth_ops(void); -- cgit v1.2.3 From a83762d97980f13b1ec7f2514b28666c0dac3af6 Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Thu, 19 Apr 2018 17:37:25 +0100 Subject: sfc: set and clear interrupt affinity hints Use cpumask_local_spread to provide interrupt affinity hints for each queue. This will spread interrupts across NUMA local CPUs first, extending to remote nodes if needed. Signed-off-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 692dd729ee2a..c30b9e26f131 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1551,6 +1551,38 @@ static int efx_probe_interrupts(struct efx_nic *efx) return 0; } +#if defined(CONFIG_SMP) +static void efx_set_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + unsigned int cpu; + + efx_for_each_channel(channel, efx) { + cpu = cpumask_local_spread(channel->channel, + pcibus_to_node(efx->pci_dev->bus)); + irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + } +} + +static void efx_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + irq_set_affinity_hint(channel->irq, NULL); +} +#else +static void +efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} + +static void +efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} +#endif /* CONFIG_SMP */ + static int efx_soft_enable_interrupts(struct efx_nic *efx) { struct efx_channel *channel, *end_channel; @@ -3165,6 +3197,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) cancel_work_sync(&efx->reset_work); efx_disable_interrupts(efx); + efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); @@ -3314,6 +3347,8 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; + + efx_set_interrupt_affinity(efx); rc = efx_enable_interrupts(efx); if (rc) goto fail6; @@ -3321,6 +3356,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) return 0; fail6: + efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); -- cgit v1.2.3 From 760db29bdc97b73ff60b091315ad787b1deb5cf5 Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Thu, 19 Apr 2018 17:59:38 +0100 Subject: lan78xx: Read MAC address from DT if present There is a standard mechanism for locating and using a MAC address from the Device Tree. Use this facility in the lan78xx driver to support applications without programmed EEPROM or OTP. At the same time, regularise the handling of the different address sources. Signed-off-by: Phil Elwell Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0867f7275852..a823f010de30 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "lan78xx.h" #define DRIVER_AUTHOR "WOOJUNG HUH " @@ -1652,34 +1653,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) addr[5] = (addr_hi >> 8) & 0xFF; if (!is_valid_ether_addr(addr)) { - /* reading mac address from EEPROM or OTP */ - if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - addr) == 0) || - (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - addr) == 0)) { - if (is_valid_ether_addr(addr)) { - /* eeprom values are valid so use them */ - netif_dbg(dev, ifup, dev->net, - "MAC address read from EEPROM"); - } else { - /* generate random MAC */ - random_ether_addr(addr); - netif_dbg(dev, ifup, dev->net, - "MAC address set to random addr"); - } - - addr_lo = addr[0] | (addr[1] << 8) | - (addr[2] << 16) | (addr[3] << 24); - addr_hi = addr[4] | (addr[5] << 8); - - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) { + /* valid address present in Device Tree */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from Device Tree"); + } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, + ETH_ALEN, addr) == 0) || + (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, + ETH_ALEN, addr) == 0)) && + is_valid_ether_addr(addr)) { + /* eeprom values are valid so use them */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from EEPROM"); } else { /* generate random MAC */ random_ether_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } + + addr_lo = addr[0] | (addr[1] << 8) | + (addr[2] << 16) | (addr[3] << 24); + addr_hi = addr[4] | (addr[5] << 8); + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); } ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); -- cgit v1.2.3 From 1827b0678863bc97a1653fdf5308762b2aefcd56 Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Thu, 19 Apr 2018 17:59:39 +0100 Subject: lan78xx: Read LED states from Device Tree Add support for DT property "microchip,led-modes", a vector of zero to four cells (u32s) in the range 0-15, each of which sets the mode for one of the LEDs. Some possible values are: 0=link/activity 1=link1000/activity 2=link100/activity 3=link10/activity 4=link100/1000/activity 5=link10/1000/activity 6=link10/100/activity 14=off 15=on These values are given symbolic constants in a dt-bindings header. Also use the presence of the DT property to indicate that the LEDs should be enabled - necessary in the event that no valid OTP or EEPROM is available. Signed-off-by: Phil Elwell Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/phy/microchip.c | 25 ++++++++++++++++++++++ drivers/net/usb/lan78xx.c | 32 ++++++++++++++++++++++++++++- include/dt-bindings/net/microchip-lan78xx.h | 21 +++++++++++++++++++ include/linux/microchipphy.h | 3 +++ 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/net/microchip-lan78xx.h (limited to 'drivers/net') diff --git a/MAINTAINERS b/MAINTAINERS index a7321687cae9..c952d3076a65 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14572,6 +14572,7 @@ M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: drivers/net/usb/lan78xx.* +F: include/dt-bindings/net/microchip-lan78xx.h USB MASS STORAGE DRIVER M: Alan Stern diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..ef5e160fe9dc 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #define DRIVER_AUTHOR "WOOJUNG HUH " #define DRIVER_DESC "Microchip LAN88XX PHY driver" @@ -70,6 +72,8 @@ static int lan88xx_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv; + u32 led_modes[4]; + int len; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -77,6 +81,27 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; + len = of_property_read_variable_u32_array(dev->of_node, + "microchip,led-modes", + led_modes, + 0, + ARRAY_SIZE(led_modes)); + if (len >= 0) { + u32 reg = 0; + int i; + + for (i = 0; i < len; i++) { + if (led_modes[i] > 15) + return -EINVAL; + reg |= led_modes[i] << (i * 4); + } + for (; i < ARRAY_SIZE(led_modes); i++) + reg |= LAN78XX_FORCE_LED_OFF << (i * 4); + (void)phy_write(phydev, LAN78XX_PHY_LED_MODE_SELECT, reg); + } else if (len == -EOVERFLOW) { + return -EINVAL; + } + /* these values can be used to identify internal PHY */ priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID); priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a823f010de30..6b03b973083e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "lan78xx.h" @@ -1760,6 +1761,7 @@ done: static int lan78xx_mdio_init(struct lan78xx_net *dev) { + struct device_node *node; int ret; dev->mdiobus = mdiobus_alloc(); @@ -1788,7 +1790,13 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) break; } - ret = mdiobus_register(dev->mdiobus); + node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); + if (node) { + ret = of_mdiobus_register(dev->mdiobus, node); + of_node_put(node); + } else { + ret = mdiobus_register(dev->mdiobus); + } if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; @@ -2077,6 +2085,28 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + if (phydev->mdio.dev.of_node) { + u32 reg; + int len; + + len = of_property_count_elems_of_size(phydev->mdio.dev.of_node, + "microchip,led-modes", + sizeof(u32)); + if (len >= 0) { + /* Ensure the appropriate LEDs are enabled */ + lan78xx_read_reg(dev, HW_CFG, ®); + reg &= ~(HW_CFG_LED0_EN_ | + HW_CFG_LED1_EN_ | + HW_CFG_LED2_EN_ | + HW_CFG_LED3_EN_); + reg |= (len > 0) * HW_CFG_LED0_EN_ | + (len > 1) * HW_CFG_LED1_EN_ | + (len > 2) * HW_CFG_LED2_EN_ | + (len > 3) * HW_CFG_LED3_EN_; + lan78xx_write_reg(dev, HW_CFG, reg); + } + } + genphy_config_aneg(phydev); dev->fc_autoneg = phydev->autoneg; diff --git a/include/dt-bindings/net/microchip-lan78xx.h b/include/dt-bindings/net/microchip-lan78xx.h new file mode 100644 index 000000000000..0742ff075307 --- /dev/null +++ b/include/dt-bindings/net/microchip-lan78xx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_MICROCHIP_LAN78XX_H +#define _DT_BINDINGS_MICROCHIP_LAN78XX_H + +/* LED modes for LAN7800/LAN7850 embedded PHY */ + +#define LAN78XX_LINK_ACTIVITY 0 +#define LAN78XX_LINK_1000_ACTIVITY 1 +#define LAN78XX_LINK_100_ACTIVITY 2 +#define LAN78XX_LINK_10_ACTIVITY 3 +#define LAN78XX_LINK_100_1000_ACTIVITY 4 +#define LAN78XX_LINK_10_1000_ACTIVITY 5 +#define LAN78XX_LINK_10_100_ACTIVITY 6 +#define LAN78XX_DUPLEX_COLLISION 8 +#define LAN78XX_COLLISION 9 +#define LAN78XX_ACTIVITY 10 +#define LAN78XX_AUTONEG_FAULT 12 +#define LAN78XX_FORCE_LED_OFF 14 +#define LAN78XX_FORCE_LED_ON 15 + +#endif diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8e4015e7f8d3 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h @@ -70,4 +70,7 @@ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) +/* Registers specific to the LAN7800/LAN7850 embedded phy */ +#define LAN78XX_PHY_LED_MODE_SELECT (0x1D) + #endif /* _MICROCHIPPHY_H */ -- cgit v1.2.3 From 496218656f9857d801512efdec1d609ebbe8a83b Mon Sep 17 00:00:00 2001 From: Raghuram Chary J Date: Fri, 20 Apr 2018 11:43:50 +0530 Subject: lan78xx: Add support to dump lan78xx registers In order to dump lan78xx family registers using ethtool, add support at lan78xx driver level. Signed-off-by: Raghuram Chary J Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 6b03b973083e..c59f8afd0d73 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -280,6 +280,30 @@ struct lan78xx_statstage64 { u64 eee_tx_lpi_time; }; +static u32 lan78xx_regs[] = { + ID_REV, + INT_STS, + HW_CFG, + PMT_CTL, + E2P_CMD, + E2P_DATA, + USB_STATUS, + VLAN_TYPE, + MAC_CR, + MAC_RX, + MAC_TX, + FLOW, + ERR_STS, + MII_ACC, + MII_DATA, + EEE_TX_LPI_REQ_DLY, + EEE_TW_TX_SYS, + EEE_TX_LPI_REM_DLY, + WUCSR +}; + +#define PHY_REG_SIZE (32 * sizeof(u32)) + struct lan78xx_net; struct lan78xx_priv { @@ -1607,6 +1631,34 @@ exit: return ret; } +static int lan78xx_get_regs_len(struct net_device *netdev) +{ + if (!netdev->phydev) + return (sizeof(lan78xx_regs)); + else + return (sizeof(lan78xx_regs) + PHY_REG_SIZE); +} + +static void +lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *buf) +{ + u32 *data = buf; + int i, j; + struct lan78xx_net *dev = netdev_priv(netdev); + + /* Read Device/MAC registers */ + for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++) + lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]); + + if (!netdev->phydev) + return; + + /* Read PHY registers */ + for (j = 0; j < 32; i++, j++) + data[i] = phy_read(netdev->phydev, j); +} + static const struct ethtool_ops lan78xx_ethtool_ops = { .get_link = lan78xx_get_link, .nway_reset = phy_ethtool_nway_reset, @@ -1627,6 +1679,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { .set_pauseparam = lan78xx_set_pause, .get_link_ksettings = lan78xx_get_link_ksettings, .set_link_ksettings = lan78xx_set_link_ksettings, + .get_regs_len = lan78xx_get_regs_len, + .get_regs = lan78xx_get_regs, }; static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From cf1a1e07fc8bb29947ad3c9568d73aee3f851431 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 20 Apr 2018 13:18:16 +0200 Subject: tun: do not compute the rxhash, if not needed Currently, the tun driver, in absence of an eBPF steering program, always compute the rxhash in its rx path, even when such value is later unused due to additional checks ( This changeset moves the all the related checks just before the __skb_get_hash_symmetric(), so that the latter is no more computed when unneeded. Also replace an unneeded RCU section with rcu_access_pointer(). Signed-off-by: Paolo Abeni Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 1e58be152d5c..091ace726763 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -525,11 +525,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, rcu_read_lock(); - /* We may get a very small possibility of OOO during switching, not - * worth to optimize.*/ - if (tun->numqueues == 1 || tfile->detached) - goto unlock; - e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ @@ -548,7 +543,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, spin_unlock_bh(&tun->lock); } -unlock: rcu_read_unlock(); } @@ -1937,10 +1931,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rcu_read_unlock(); } - rcu_read_lock(); - if (!rcu_dereference(tun->steering_prog)) + /* Compute the costly rx hash only if needed for flow updates. + * We may get a very small possibility of OOO during switching, not + * worth to optimize. + */ + if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && + !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); - rcu_read_unlock(); if (frags) { /* Exercise flow dissector code path. */ -- cgit v1.2.3 From 48d7a07ba355c7056e66adcdd35379d12e48252b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 20 Apr 2018 08:48:47 -0700 Subject: hv_netvsc: select needed ucs2_string routine The conversion of rndis friendly name to utf8 uses a standard kernel routine which is optional in config. Therefore build would fail for some configurations. Resolve by selecting needed library. Fixes: 0fe554a46a0f ("hv_netvsc: propogate Hyper-V friendly name into interface alias") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 936968d23559..0765d5f61714 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -1,5 +1,6 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV + select UCS2_STRING help Select this option to enable the Hyper-V virtual network driver. -- cgit v1.2.3 From aaa6452763cf944da72f16e0736554accb364703 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Sun, 22 Apr 2018 21:16:48 -0700 Subject: bpf: fix virtio-net's length calc for XDP_PASS In commit 6870de435b90 ("bpf: make virtio compatible w/ bpf_xdp_adjust_tail") i didn't account for vi->hdr_len during new packet's length calculation after bpf_prog_run in receive_mergeable. because of this all packets, if they were passed to the kernel, were truncated by 12 bytes. Fixes:6870de435b90 ("bpf: make virtio compatible w/ bpf_xdp_adjust_tail") Reported-by: David Ahern Signed-off-by: Nikita V. Shirokov Acked-by: Jason Wang Signed-off-by: Daniel Borkmann --- drivers/net/virtio_net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3b5991734118..f34794a76c4d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -765,7 +765,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, /* recalculate len if xdp.data or xdp.data_end were * adjusted */ - len = xdp.data_end - xdp.data; + len = xdp.data_end - xdp.data + vi->hdr_len; /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); -- cgit v1.2.3 From 22148df0d0bdbd5d1cca4e069e17bf49b070498f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 22 Apr 2018 17:15:15 +0200 Subject: r8169: don't use netif_info et al before net_device has been registered There's no benefit in using netif_info et al before the net_device has been registered. We get messages like r8169 0000:03:00.0 (unnamed net_device) (uninitialized): [message] Therefore use dev_info/dev_err instead. As a side effect we don't need parameter dev for function rtl8169_get_mac_version() any longer. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fcd42d0387b9..a5d00ee94245 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2483,7 +2483,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, - struct net_device *dev, u8 default_version) + u8 default_version) { /* * The driver currently handles the 8168Bf and the 8168Be identically @@ -2588,8 +2588,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, tp->mac_version = p->mac_version; if (tp->mac_version == RTL_GIGA_MAC_NONE) { - netif_notice(tp, probe, dev, - "unknown MAC, using family default\n"); + dev_notice(tp_to_dev(tp), + "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { tp->mac_version = tp->mii.supports_gmii ? @@ -8107,40 +8107,39 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { - netif_err(tp, probe, dev, "enable failure\n"); + dev_err(&pdev->dev, "enable failure\n"); return rc; } if (pcim_set_mwi(pdev) < 0) - netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); /* use first MMIO region */ region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; if (region < 0) { - netif_err(tp, probe, dev, "no MMIO resource found\n"); + dev_err(&pdev->dev, "no MMIO resource found\n"); return -ENODEV; } /* check for weird/broken PCI region reporting */ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { - netif_err(tp, probe, dev, - "Invalid PCI region size(s), aborting\n"); + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); return -ENODEV; } rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); if (rc < 0) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); return rc; } tp->mmio_addr = pcim_iomap_table(pdev)[region]; if (!pci_is_pcie(pdev)) - netif_info(tp, probe, dev, "not PCI Express\n"); + dev_info(&pdev->dev, "not PCI Express\n"); /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, dev, cfg->default_ver); + rtl8169_get_mac_version(tp, cfg->default_ver); tp->cp_cmd = 0; @@ -8157,7 +8156,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); + dev_err(&pdev->dev, "DMA configuration failed\n"); return rc; } } @@ -8185,7 +8184,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = rtl_alloc_irq(tp); if (rc < 0) { - netif_err(tp, probe, dev, "Can't allocate interrupt\n"); + dev_err(&pdev->dev, "Can't allocate interrupt\n"); return rc; } -- cgit v1.2.3 From 5f0456b43140af9413397cc11d03d18b9f2fc2fc Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 23 Apr 2018 09:05:15 +0100 Subject: net: stmmac: Implement logic to automatically select HW Interface Move all the core version detection to a common place ("hwif.c") and implement a table which can be used to lookup the correct callbacks for each IP version. This simplifies the initialization flow of each IP version and eases future implementation of new IP versions. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 3 +- drivers/net/ethernet/stmicro/stmmac/common.h | 30 +-- drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 1 - .../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 29 +-- .../net/ethernet/stmicro/stmmac/dwmac100_core.c | 23 +-- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 - drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 41 ++-- drivers/net/ethernet/stmicro/stmmac/hwif.c | 220 +++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 17 ++ drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 77 +------- 11 files changed, 279 insertions(+), 164 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/hwif.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 972e4ef6d414..e3b578b4f7cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -4,7 +4,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ - dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o $(stmmac-y) + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ + $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 59673c6d2e52..627e905b6d76 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -39,6 +39,7 @@ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 #define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 #define DWMAC_CORE_5_10 0x51 #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ @@ -428,12 +429,9 @@ struct stmmac_rx_routing { u32 reg_shift; }; -struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, - int *synopsys_id); -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id); -struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, int *synopsys_id); +int dwmac100_setup(struct stmmac_priv *priv); +int dwmac1000_setup(struct stmmac_priv *priv); +int dwmac4_setup(struct stmmac_priv *priv); void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); @@ -453,24 +451,4 @@ extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; extern const struct stmmac_desc_ops dwmac4_desc_ops; -/** - * stmmac_get_synopsys_id - return the SYINID. - * @priv: driver private structure - * Description: this simple function is to decode and return the SYINID - * starting from the HW core register. - */ -static inline u32 stmmac_get_synopsys_id(u32 hwid) -{ - /* Check Synopsys Id (not available on old chips) */ - if (likely(hwid)) { - u32 uid = ((hwid & 0x0000ff00) >> 8); - u32 synid = (hwid & 0x000000ff); - - pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", - uid, synid); - - return synid; - } - return 0; -} #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index c02d36629c52..184ca13c8f79 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -29,7 +29,6 @@ #define GMAC_MII_DATA 0x00000014 /* MII Data */ #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ -#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC_DEBUG 0x00000024 /* GMAC debug register */ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index ef10baf14186..0877bde6e860 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -27,6 +27,7 @@ #include #include #include +#include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac1000.h" @@ -498,7 +499,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, x->mac_gmii_rx_proto_engine++; } -static const struct stmmac_ops dwmac1000_ops = { +const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac1000_rx_ipc_enable, @@ -519,28 +520,21 @@ static const struct stmmac_ops dwmac1000_ops = { .pcs_get_adv_lp = dwmac1000_get_adv_lp, }; -struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, - int *synopsys_id) +int dwmac1000_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; - u32 hwid = readl(ioaddr + GMAC_VERSION); + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; + dev_info(priv->device, "\tDWMAC1000\n"); - mac->pcsr = ioaddr; - mac->multicast_filter_bins = mcbins; - mac->unicast_filter_entries = perfect_uc_entries; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; mac->mcast_bits_log2 = 0; if (mac->multicast_filter_bins) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); - mac->mac = &dwmac1000_ops; - mac->dma = &dwmac1000_dma_ops; - mac->link.duplex = GMAC_CONTROL_DM; mac->link.speed10 = GMAC_CONTROL_PS; mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; @@ -555,8 +549,5 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - /* Get and dump the chip ID */ - *synopsys_id = stmmac_get_synopsys_id(hwid); - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 91b23f9db31a..b735143987e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -27,6 +27,7 @@ #include #include #include +#include "stmmac.h" #include "dwmac100.h" static void dwmac100_core_init(struct mac_device_info *hw, @@ -159,7 +160,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) return; } -static const struct stmmac_ops dwmac100_ops = { +const struct stmmac_ops dwmac100_ops = { .core_init = dwmac100_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac100_rx_ipc_enable, @@ -172,20 +173,13 @@ static const struct stmmac_ops dwmac100_ops = { .get_umac_addr = dwmac100_get_umac_addr, }; -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) +int dwmac100_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; - - pr_info("\tDWMAC100\n"); - - mac->pcsr = ioaddr; - mac->mac = &dwmac100_ops; - mac->dma = &dwmac100_dma_ops; + dev_info(priv->device, "\tDWMAC100\n"); + mac->pcsr = priv->ioaddr; mac->link.duplex = MAC_CONTROL_F; mac->link.speed10 = 0; mac->link.speed100 = 0; @@ -200,8 +194,5 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - /* Synopsys Id is not available on old chips */ - *synopsys_id = 0; - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index dedd40613090..03eab9077c1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -34,7 +34,6 @@ #define GMAC_PCS_BASE 0x000000e0 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 -#define GMAC_VERSION 0x00000110 #define GMAC_DEBUG 0x00000114 #define GMAC_HW_FEATURE0 0x0000011c #define GMAC_HW_FEATURE1 0x00000120 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 517b1f6736a8..7289b3b47d8e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -18,6 +18,7 @@ #include #include #include +#include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac4.h" #include "dwmac5.h" @@ -700,7 +701,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, x->mac_gmii_rx_proto_engine++; } -static const struct stmmac_ops dwmac4_ops = { +const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -731,7 +732,7 @@ static const struct stmmac_ops dwmac4_ops = { .set_filter = dwmac4_set_filter, }; -static const struct stmmac_ops dwmac410_ops = { +const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -762,7 +763,7 @@ static const struct stmmac_ops dwmac410_ops = { .set_filter = dwmac4_set_filter, }; -static const struct stmmac_ops dwmac510_ops = { +const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -796,19 +797,16 @@ static const struct stmmac_ops dwmac510_ops = { .safety_feat_dump = dwmac5_safety_feat_dump, }; -struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, int *synopsys_id) +int dwmac4_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; - u32 hwid = readl(ioaddr + GMAC_VERSION); + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; + dev_info(priv->device, "\tDWMAC4/5\n"); - mac->pcsr = ioaddr; - mac->multicast_filter_bins = mcbins; - mac->unicast_filter_entries = perfect_uc_entries; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; mac->mcast_bits_log2 = 0; if (mac->multicast_filter_bins) @@ -828,20 +826,5 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, mac->mii.clk_csr_shift = 8; mac->mii.clk_csr_mask = GENMASK(11, 8); - /* Get and dump the chip ID */ - *synopsys_id = stmmac_get_synopsys_id(hwid); - - if (*synopsys_id > DWMAC_CORE_4_00) - mac->dma = &dwmac410_dma_ops; - else - mac->dma = &dwmac4_dma_ops; - - if (*synopsys_id >= DWMAC_CORE_5_10) - mac->mac = &dwmac510_ops; - else if (*synopsys_id >= DWMAC_CORE_4_00) - mac->mac = &dwmac410_ops; - else - mac->mac = &dwmac4_ops; - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c new file mode 100644 index 000000000000..2b0a7e79de00 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac HW Interface Handling + */ + +#include "common.h" +#include "stmmac.h" + +static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + (unsigned int)(reg & GENMASK(15, 8)) >> 8, + (unsigned int)(reg & GENMASK(7, 0))); + return reg & GENMASK(7, 0); +} + +static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->chain_mode) { + dev_info(priv->device, "Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + mac->mode = &chain_mode_ops; + } else { + dev_info(priv->device, "Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + mac->mode = &ring_mode_ops; + } +} + +static int stmmac_dwmac1_quirks(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->plat->enh_desc) { + dev_info(priv->device, "Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + dev_info(priv->device, "Enabled extended descriptors\n"); + priv->extend_desc = 1; + } else { + dev_warn(priv->device, "Extended descriptors not supported\n"); + } + + mac->desc = &enh_desc_ops; + } else { + dev_info(priv->device, "Normal descriptors\n"); + mac->desc = &ndesc_ops; + } + + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwmac4_quirks(struct stmmac_priv *priv) +{ + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static const struct stmmac_hwif_entry { + bool gmac; + bool gmac4; + u32 min_id; + const void *desc; + const void *dma; + const void *mac; + const void *hwtimestamp; + const void *mode; + int (*setup)(struct stmmac_priv *priv); + int (*quirks)(struct stmmac_priv *priv); +} stmmac_hw[] = { + /* NOTE: New HW versions shall go to the end of this table */ + { + .gmac = false, + .gmac4 = false, + .min_id = 0, + .desc = NULL, + .dma = &dwmac100_dma_ops, + .mac = &dwmac100_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .setup = dwmac100_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = true, + .gmac4 = false, + .min_id = 0, + .desc = NULL, + .dma = &dwmac1000_dma_ops, + .mac = &dwmac1000_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .setup = dwmac1000_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = false, + .gmac4 = true, + .min_id = 0, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac4_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .setup = dwmac4_setup, + .quirks = stmmac_dwmac4_quirks, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_4_00, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_4_10, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_5_10, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac510_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .setup = dwmac4_setup, + .quirks = NULL, + } +}; + +int stmmac_hwif_init(struct stmmac_priv *priv) +{ + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + int i, ret; + u32 id; + + if (needs_gmac) { + id = stmmac_get_id(priv, GMAC_VERSION); + } else { + id = stmmac_get_id(priv, GMAC4_VERSION); + } + + /* Save ID for later use */ + priv->synopsys_id = id; + + /* Check for HW specific setup first */ + if (priv->plat->setup) { + priv->hw = priv->plat->setup(priv); + if (!priv->hw) + return -ENOMEM; + return 0; + } + + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + if (!mac) + return -ENOMEM; + + /* Fallback to generic HW */ + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (needs_gmac ^ entry->gmac) + continue; + if (needs_gmac4 ^ entry->gmac4) + continue; + if (id < entry->min_id) + continue; + + mac->desc = entry->desc; + mac->dma = entry->dma; + mac->mac = entry->mac; + mac->ptp = entry->hwtimestamp; + mac->mode = entry->mode; + + priv->hw = mac; + + /* Entry found */ + ret = entry->setup(priv); + if (ret) + return ret; + + /* Run quirks, if needed */ + if (entry->quirks) { + ret = entry->quirks(priv); + if (ret) + return ret; + } + + return 0; + } + + dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + id, needs_gmac, needs_gmac4); + return -EINVAL; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index f81ded4a9946..bfad61607f07 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -418,4 +418,21 @@ struct stmmac_mode_ops { #define stmmac_clean_desc3(__priv, __args...) \ stmmac_do_void_callback(__priv, mode, clean_desc3, __args) +struct stmmac_priv; + +extern const struct stmmac_ops dwmac100_ops; +extern const struct stmmac_dma_ops dwmac100_dma_ops; +extern const struct stmmac_ops dwmac1000_ops; +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +extern const struct stmmac_ops dwmac4_ops; +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_ops dwmac410_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +extern const struct stmmac_ops dwmac510_ops; + +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ + +int stmmac_hwif_init(struct stmmac_priv *priv); + #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index da50451f8999..2443f20e07bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -130,6 +130,7 @@ struct stmmac_priv { int eee_active; int tx_lpi_timer; unsigned int mode; + unsigned int chain_mode; int extend_desc; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48b55407a953..0135fd3aa6ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -769,7 +769,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) netdev_info(priv->dev, "IEEE 1588-2008 Advanced Timestamp supported\n"); - priv->hw->ptp = &stmmac_ptp; priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; @@ -2121,32 +2120,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) netdev_info(priv->dev, "No MAC Management Counters available\n"); } -/** - * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors - * @priv: driver private structure - * Description: select the Enhanced/Alternate or Normal descriptors. - * In case of Enhanced/Alternate, it checks if the extended descriptors are - * supported by the HW capability register. - */ -static void stmmac_selec_desc_mode(struct stmmac_priv *priv) -{ - if (priv->plat->enh_desc) { - dev_info(priv->device, "Enhanced/Alternate descriptors\n"); - - /* GMAC older than 3.50 has no extended descriptors */ - if (priv->synopsys_id >= DWMAC_CORE_3_50) { - dev_info(priv->device, "Enabled extended descriptors\n"); - priv->extend_desc = 1; - } else - dev_warn(priv->device, "Extended descriptors not supported\n"); - - priv->hw->desc = &enh_desc_ops; - } else { - dev_info(priv->device, "Normal descriptors\n"); - priv->hw->desc = &ndesc_ops; - } -} - /** * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. * @priv: driver private structure @@ -4098,49 +4071,17 @@ static void stmmac_service_task(struct work_struct *work) */ static int stmmac_hw_init(struct stmmac_priv *priv) { - struct mac_device_info *mac; - - /* Identify the MAC HW device */ - if (priv->plat->setup) { - mac = priv->plat->setup(priv); - } else if (priv->plat->has_gmac) { - priv->dev->priv_flags |= IFF_UNICAST_FLT; - mac = dwmac1000_setup(priv->ioaddr, - priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries, - &priv->synopsys_id); - } else if (priv->plat->has_gmac4) { - priv->dev->priv_flags |= IFF_UNICAST_FLT; - mac = dwmac4_setup(priv->ioaddr, - priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries, - &priv->synopsys_id); - } else { - mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); - } - if (!mac) - return -ENOMEM; - - priv->hw = mac; + int ret; /* dwmac-sun8i only work in chain mode */ if (priv->plat->has_sun8i) chain_mode = 1; + priv->chain_mode = chain_mode; - /* To use the chained or ring mode */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->hw->mode = &dwmac4_ring_mode_ops; - } else { - if (chain_mode) { - priv->hw->mode = &chain_mode_ops; - dev_info(priv->device, "Chain mode enabled\n"); - priv->mode = STMMAC_CHAIN_MODE; - } else { - priv->hw->mode = &ring_mode_ops; - dev_info(priv->device, "Ring mode enabled\n"); - priv->mode = STMMAC_RING_MODE; - } - } + /* Initialize HW Interface */ + ret = stmmac_hwif_init(priv); + if (ret) + return ret; /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); @@ -4174,12 +4115,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv) dev_info(priv->device, "No HW DMA feature register supported\n"); } - /* To use alternate (extended), normal or GMAC4 descriptor structures */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) - priv->hw->desc = &dwmac4_desc_ops; - else - stmmac_selec_desc_mode(priv); - if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; dev_info(priv->device, "RX Checksum Offload Engine supported\n"); -- cgit v1.2.3 From b60bfdfec5b8ec88552e75c8bd99f1ebfa66a6e0 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 23 Apr 2018 14:56:04 +0300 Subject: qed: Delete unused parameter p_ptt from mcp APIs Since nvm images attributes are cached during driver load, acquiring ptt is not needed when calling qed_mcp_get_nvm_image(). Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +-------- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 4 +--- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 2 -- 3 files changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9854aa9139af..d1d3787affe8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1894,15 +1894,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); - int rc; - - if (!ptt) - return -EAGAIN; - rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); - qed_ptt_release(hwfn, ptt); - return rc; + return qed_mcp_get_nvm_image(hwfn, type, buf, len); } static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ec0d425766a7..1377ad172fb3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2531,7 +2531,6 @@ err0: static int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { @@ -2569,7 +2568,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, } int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len) { @@ -2578,7 +2576,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, memset(p_buffer, 0, buffer_len); - rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); if (rc) return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8a5c988d0c3c..dd62c38b3bd3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -486,7 +486,6 @@ struct qed_nvm_image_att { * @brief Allows reading a whole nvram image * * @param p_hwfn - * @param p_ptt * @param image_id - image requested for reading * @param p_buffer - allocated buffer into which to fill data * @param buffer_len - length of the allocated buffer. @@ -494,7 +493,6 @@ struct qed_nvm_image_att { * @return 0 iff p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); -- cgit v1.2.3 From 1ac4329a1cff2e0bb12b71c13ad53a0e05bc87a6 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 23 Apr 2018 14:56:05 +0300 Subject: qed: Add configuration information to register dump and debug data Configuration information is added to the debug data collection, in addition to register dump. Added qed_dbg_nvm_image() that receives an image type, allocates a buffer and reads the image. The images are saved in the buffers and the dump size is updated. Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_debug.c | 113 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 14 +++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 14 ++++ include/linux/qed/qed_if.h | 3 + 4 files changed, 139 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 4926c5532fba..b3211c7d38c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7778,6 +7778,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } +int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) +{ + struct qed_nvm_image_att image_att; + int rc; + + *length = 0; + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); + if (rc) + return rc; + + *length = image_att.length; + + return rc; +} + +int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, enum qed_nvm_images image_id) +{ + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 len_rounded, i; + __be32 val; + int rc; + + *num_dumped_bytes = 0; + rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded); + if (rc) + return rc; + + DP_NOTICE(p_hwfn->cdev, + "Collecting a debug feature [\"nvram image %d\"]\n", + image_id); + + len_rounded = roundup(len_rounded, sizeof(u32)); + rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded); + if (rc) + return rc; + + /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ + if (image_id != QED_NVM_IMAGE_NVM_META) + for (i = 0; i < len_rounded; i += 4) { + val = cpu_to_be32(*(u32 *)(buffer + i)); + *(u32 *)(buffer + i) = val; + } + + *num_dumped_bytes = len_rounded; + + return rc; +} + int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { @@ -7831,6 +7882,9 @@ enum debug_print_features { IGU_FIFO = 6, PHY = 7, FW_ASSERTS = 8, + NVM_CFG1 = 9, + DEFAULT_CFG = 10, + NVM_META = 11, }; static u32 qed_calc_regdump_header(enum debug_print_features feature, @@ -7965,13 +8019,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* nvm cfg1 */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_CFG1); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_CFG1, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + } + + /* nvm default */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(DEFAULT_CFG, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", + rc); + } + + /* nvm meta */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_META); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_META, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + } + return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 regs_len = 0, image_len = 0; u8 cur_engine, org_engine; - u32 regs_len = 0; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7993,6 +8095,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) /* Engine common */ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; return regs_len; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 1377ad172fb3..0550f0ee11b3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2529,7 +2529,7 @@ err0: return rc; } -static int +int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) @@ -2545,6 +2545,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; + case QED_NVM_IMAGE_NVM_CFG1: + type = NVM_TYPE_NVM_CFG1; + break; + case QED_NVM_IMAGE_DEFAULT_CFG: + type = NVM_TYPE_DEFAULT_CFG; + break; + case QED_NVM_IMAGE_NVM_META: + type = NVM_TYPE_META; + break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", image_id); @@ -2588,9 +2597,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, return -EINVAL; } - /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ - image_att.length -= 4; - if (image_att.length > buffer_len) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dd62c38b3bd3..3af3896420b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -482,6 +482,20 @@ struct qed_nvm_image_att { u32 length; }; +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn + * @param image_id - image to get attributes for + * @param p_image_att - image attributes structure into which to fill data + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att); + /** * @brief Allows reading a whole nvram image * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b5b2bc9eacca..e53f9c7c2809 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -159,6 +159,9 @@ struct qed_dcbx_get { enum qed_nvm_images { QED_NVM_IMAGE_ISCSI_CFG, QED_NVM_IMAGE_FCOE_CFG, + QED_NVM_IMAGE_NVM_CFG1, + QED_NVM_IMAGE_DEFAULT_CFG, + QED_NVM_IMAGE_NVM_META, }; struct qed_link_eee_params { -- cgit v1.2.3 From cea19a6ce8bf0518d156beea419d822021cc3705 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Thu, 19 Apr 2018 19:39:38 +0300 Subject: ath10k: add WMI_SERVICE_AVAILABLE_EVENT support Add WMI_SERVICE_AVAILABLE_EVENT to extend WMI_SERVICE_READY_EVENT, the 128bit service map in WMI_SERVICE_READY_EVENT is not enough for firmware to notice new WLAN service to host driver. Hereby, for thoese new WLAN service, firmware will notice host driver by WMI_SERVICE_AVAILABLE_EVENT. Signed-off-by: Alan Liu Signed-off-by: Carl Huang Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-ops.h | 24 +++ drivers/net/wireless/ath/ath10k/wmi-tlv.c | 38 ++++ drivers/net/wireless/ath/ath10k/wmi-tlv.h | 346 +++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi.c | 21 +- drivers/net/wireless/ath/ath10k/wmi.h | 8 + 5 files changed, 431 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index c35e45340b4f..4f7e0ba071a2 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -25,6 +25,7 @@ struct sk_buff; struct wmi_ops { void (*rx)(struct ath10k *ar, struct sk_buff *skb); void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); + void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, struct wmi_scan_ev_arg *arg); @@ -54,6 +55,9 @@ struct wmi_ops { struct wmi_wow_ev_arg *arg); int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, struct wmi_echo_ev_arg *arg); + int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg); + enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); @@ -229,6 +233,17 @@ ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, return 0; } +static inline int +ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, + size_t len) +{ + if (!ar->wmi.ops->map_svc_ext) + return -EOPNOTSUPP; + + ar->wmi.ops->map_svc_ext(in, out, len); + return 0; +} + static inline int ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, struct wmi_scan_ev_arg *arg) @@ -329,6 +344,15 @@ ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_rdy(ar, skb, arg); } +static inline int +ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_svc_avail) + return -EOPNOTSUPP; + return ar->wmi.ops->pull_svc_avail(ar, skb, arg); +} + static inline int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 9d1b0a459069..31866e3b74fc 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -594,6 +594,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; + case WMI_TLV_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); + break; case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); break; @@ -1117,6 +1120,39 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_svc_avail_ev_arg *arg = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: + arg->service_map_ext_len = *(__le32 *)ptr; + arg->service_map_ext = ptr + sizeof(__le32); + return 0; + default: + break; + } + return -EPROTO; +} + +static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_svc_avail_parse, arg); + + if (ret) { + ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret); + return ret; + } + + return 0; +} + static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, struct ath10k_fw_stats_vdev *dst) { @@ -3740,6 +3776,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { static const struct wmi_ops wmi_tlv_ops = { .rx = ath10k_wmi_tlv_op_rx, .map_svc = wmi_tlv_svc_map, + .map_svc_ext = wmi_tlv_svc_map_ext, .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, @@ -3751,6 +3788,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, + .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail, .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index fa3773ec7c68..529b91b17061 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -295,6 +295,7 @@ enum wmi_tlv_cmd_id { enum wmi_tlv_event_id { WMI_TLV_SERVICE_READY_EVENTID = 0x1, WMI_TLV_READY_EVENTID, + WMI_TLV_SERVICE_AVAILABLE_EVENTID, WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN), WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV), WMI_TLV_CHAN_INFO_EVENTID, @@ -949,6 +950,275 @@ enum wmi_tlv_tag { WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE, WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD, WMI_TLV_TAG_STRUCT_MGMT_TX_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD, + WMI_TLV_TAG_STRUCT_LRO_INFO_CMD, + WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT, + WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD, + WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT, + WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_SCPC_EVENT, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST, + WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD, + WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD, + WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM, + WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM, + WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD, + WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD, + WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP, + WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT, + WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT, + WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT, + WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT, + WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT, + WMI_TLV_TAG_STRUCT_ATF_PEER_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD, + WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN, + WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO, + WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM, + WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU, + WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD, + WMI_TLV_TAG_STRUCT_ROAM_SET_MBO, + WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ, + WMI_TLV_TAG_STRUCT_NDP_END_REQ, + WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER, + WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD, + WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES, + WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES, + WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS, + WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT, + WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES, + WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD, + WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG, + WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI, + WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST, + WMI_TLV_TAG_STRUCT_BWF_PEER_INFO, + WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD, + WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD, + WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT, + WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS, + WMI_TLV_TAG_STRUCT_RSSI_STATS, + WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD, + WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD, + WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID, + WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD, + WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT, + WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD, + WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS, + WMI_TLV_TAG_STRUCT_TX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS, + WMI_TLV_TAG_STRUCT_RX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS, + WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH, + WMI_TLV_TAG_STRUCT_TX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_RX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD, + WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT, + WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD, + WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VENDOR_OUI, + WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD, + WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PKGID_EVENT, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS, + WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT, + WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT, + WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD, + WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD, + WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD, + WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF, + WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT, + WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED, + WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY, + WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT, + WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS, + WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM, + WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT, + WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR, + WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD, + WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS, + WMI_TLV_TAG_STRUCT_HE_RATE_SET, + WMI_TLV_TAG_STRUCT_CONGESTION_STATS, + WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV, + WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID, + WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO, + WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO, + WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP, + WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM, + WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD, + WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM, + WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD, + WMI_TLV_TAG_STRUCT_PMK_CACHE, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD, + WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM, + WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT, + WMI_TLV_TAG_STRUCT_BTM_CONFIG, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM, + WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION, + WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT, + WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT, + WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD, + WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD, + WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT, WMI_TLV_TAG_MAX }; @@ -1068,16 +1338,74 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_WLAN_STATS_REPORT, WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT, WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD, + WMI_TLV_SERVICE_RCPI_SUPPORT, + WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT, + WMI_TLV_SERVICE_PEER_STATS_INFO, + WMI_TLV_SERVICE_REGULATORY_DB, + WMI_TLV_SERVICE_11D_OFFLOAD, + WMI_TLV_SERVICE_HW_DATA_FILTERING, + WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART, + WMI_TLV_SERVICE_PKT_ROUTING, + WMI_TLV_SERVICE_CHECK_CAL_VERSION, + WMI_TLV_SERVICE_OFFCHAN_TX_WMI, + WMI_TLV_SERVICE_8SS_TX_BFEE, + WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_TLV_SERVICE_ACK_TIMEOUT, + WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64, + WMI_TLV_MAX_SERVICE = 128, + +/* NOTE: + * The above service flags are delivered in the wmi_service_bitmap field + * of the WMI_TLV_SERVICE_READY_EVENT message. + * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT + * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's + * wmi_service_bitmap field. + * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the + * WMI_TLV_SERVICE_READY_EVENT message. + */ + + WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128, + WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT, + WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT, + WMI_TLV_SERVICE_FILS_SUPPORT, + WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD, + WMI_TLV_SERVICE_WLAN_DHCP_RENEW, + WMI_TLV_SERVICE_MAWC_SUPPORT, + WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG, + WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT, + WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT, + WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT, + WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT, + WMI_TLV_SERVICE_THERM_THROT, + WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT, + WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN, + WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143, + WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144, + WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145, + WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146, + WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147, + WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148, + WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149, + WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150, + WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151, + WMI_TLV_SERVICE_STA_TWT = 152, + WMI_TLV_SERVICE_AP_TWT = 153, + WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, + WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, + + WMI_TLV_MAX_EXT_SERVICE = 256, }; -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ - ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ - BIT((svc_id) % (sizeof(u32)))) +#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \ + (svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \ + BIT(((((svc_id) - (len)) % 32) & 0x1f))) #define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ __set_bit(y, out); \ } while (0) @@ -1228,6 +1556,14 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len) WMI_SERVICE_MGMT_TX_WMI, len); } +static inline void +wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) +{ + SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, + WMI_SERVICE_SPOOF_MAC_SUPPORT, + WMI_TLV_MAX_SERVICE); +} + #undef SVCMAP struct wmi_tlv { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index e12dd28fcf5d..fa07ef69122b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5059,7 +5059,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) return; } - memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map, arg.service_map_len); @@ -5269,6 +5268,21 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) return 0; } +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) +{ + int ret; + struct wmi_svc_avail_ev_arg arg = {}; + + ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse servive available event: %d\n", + ret); + } + + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); +} + static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) { const struct wmi_pdev_temperature_event *ev; @@ -5465,6 +5479,9 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_ready(ar, skb); ath10k_wmi_queue_set_coverage_class_work(ar); break; + case WMI_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -5880,6 +5897,8 @@ int ath10k_wmi_connect(struct ath10k *ar) struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; + memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); + memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 3cc129d812a4..7e2f24fdcf1c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -202,6 +202,7 @@ enum wmi_service { WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, WMI_SERVICE_TPC_STATS_FINAL, WMI_SERVICE_RESET_CHIP, + WMI_SERVICE_SPOOF_MAC_SUPPORT, /* keep last */ WMI_SERVICE_MAX, @@ -1190,6 +1191,7 @@ enum wmi_cmd_id { enum wmi_event_id { WMI_SERVICE_READY_EVENTID = 0x1, WMI_READY_EVENTID, + WMI_SERVICE_AVAILABLE_EVENTID, /* Scan specific events */ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), @@ -6639,6 +6641,11 @@ struct wmi_svc_rdy_ev_arg { const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; }; +struct wmi_svc_avail_ev_arg { + __le32 service_map_ext_len; + const __le32 *service_map_ext; +}; + struct wmi_rdy_ev_arg { __le32 sw_version; __le32 abi_version; @@ -7059,6 +7066,7 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf, int left_len, struct wmi_phyerr_ev_arg *arg); void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, -- cgit v1.2.3 From 60e1d0fb290197fe505dff6e4e3b7e4d258dbf60 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Thu, 19 Apr 2018 19:39:40 +0300 Subject: ath10k: support MAC address randomization in scan The ath10k reports the random_mac_addr capability to upper layer based on the service bit firmware reported. Driver sets the spoofed flag in scan_ctrl_flag to firmware if upper layer has enabled this feature in scan request. Test with QCA6174 hw3.0 and firmware-6.bin_WLAN.RM.4.4.1-00102-QCARMSWP-1, but QCA9377 is also affected. Signed-off-by: Carl Huang Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 17 +++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi-ops.h | 22 ++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi-tlv.c | 25 +++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi-tlv.h | 11 +++++++++++ drivers/net/wireless/ath/ath10k/wmi.c | 5 +++++ drivers/net/wireless/ath/ath10k/wmi.h | 9 +++++++++ 6 files changed, 89 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index fc3320f94f45..c71cf5b81385 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5717,6 +5717,12 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + ether_addr_copy(arg.mac_addr.addr, req->mac_addr); + ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); + } + if (req->n_channels) { arg.n_channels = req->n_channels; for (i = 0; i < arg.n_channels; i++) @@ -8433,6 +8439,17 @@ int ath10k_mac_register(struct ath10k *ar) goto err_dfs_detector_exit; } + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { + ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); + if (ret) { + ath10k_err(ar, "failed to set prob req oui: %i\n", ret); + goto err_dfs_detector_exit; + } + + ar->hw->wiphy->features |= + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + } + ar->hw->wiphy->cipher_suites = cipher_suites; /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 4f7e0ba071a2..e37d16b31afe 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -119,6 +119,8 @@ struct wmi_ops { u32 value); struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, const struct wmi_scan_chan_list_arg *arg); + struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, + u32 prob_req_oui); struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, const void *bcn, size_t bcn_len, u32 bcn_paddr, bool dtim_zero, @@ -914,6 +916,26 @@ ath10k_wmi_scan_chan_list(struct ath10k *ar, return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); } +static inline int +ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + u32 prob_req_oui; + + prob_req_oui = (((u32)mac_addr[0]) << 16) | + (((u32)mac_addr[1]) << 8) | mac_addr[2]; + + if (!ar->wmi.ops->gen_scan_prob_req_oui) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->scan_prob_req_oui_cmdid); +} + static inline int ath10k_wmi_peer_assoc(struct ath10k *ar, const struct wmi_peer_assoc_complete_arg *arg) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 31866e3b74fc..01f4eb201330 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1636,6 +1636,8 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, cmd->num_bssids = __cpu_to_le32(arg->n_bssids); cmd->ie_len = __cpu_to_le32(arg->ie_len); cmd->num_probes = __cpu_to_le32(3); + ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr); + ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr); /* FIXME: There are some scan flag inconsistencies across firmwares, * e.g. WMI-TLV inverts the logic behind the following flag. @@ -2482,6 +2484,27 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, return skb; } +static struct sk_buff * +ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui) +{ + struct wmi_scan_prob_req_oui_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->prob_req_oui = __cpu_to_le32(prob_req_oui); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n"); + return skb; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, size_t bcn_len, @@ -3452,6 +3475,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, @@ -3820,6 +3844,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, + .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui, .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 529b91b17061..954c50bb3f66 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1706,6 +1706,15 @@ struct wmi_tlv_scan_chan_list_cmd { __le32 num_scan_chans; } __packed; +struct wmi_scan_prob_req_oui_cmd { +/* OUI to be used in Probe Request frame when random MAC address is + * requested part of scan parameters. This is applied to both FW internal + * scans and host initiated scans. Host can request for random MAC address + * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag. + */ + __le32 prob_req_oui; +} __packed; + struct wmi_tlv_start_scan_cmd { struct wmi_start_scan_common common; __le32 burst_duration_ms; @@ -1714,6 +1723,8 @@ struct wmi_tlv_start_scan_cmd { __le32 num_ssids; __le32 ie_len; __le32 num_probes; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; } __packed; struct wmi_tlv_vdev_start_cmd { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index fa07ef69122b..df2e92a6c9bd 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -42,6 +42,7 @@ static struct wmi_cmd_map wmi_cmd_map = { .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, @@ -207,6 +208,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, @@ -374,6 +376,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, @@ -541,6 +544,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, @@ -1338,6 +1342,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 7e2f24fdcf1c..2705973e39c6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -791,6 +791,7 @@ struct wmi_cmd_map { u32 stop_scan_cmdid; u32 scan_chan_list_cmdid; u32 scan_sch_prio_tbl_cmdid; + u32 scan_prob_req_oui_cmdid; u32 pdev_set_regdomain_cmdid; u32 pdev_set_channel_cmdid; u32 pdev_set_param_cmdid; @@ -3168,6 +3169,8 @@ struct wmi_start_scan_arg { u16 channels[64]; struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; }; /* scan control flags */ @@ -3191,6 +3194,12 @@ struct wmi_start_scan_arg { */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +/* Use random MAC address for TA for Probe Request frame and add + * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame. + * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored. + */ +#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000 + /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 -- cgit v1.2.3 From fa3440fa2fa1531fb2aafb6d51f2bc5ac70a6247 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Thu, 19 Apr 2018 19:40:07 +0300 Subject: ath10k: convert wow pattern from 802.3 to 802.11 When trying to set wow wakeup patterns it fails with this command: iw phyxx wowlan enable patterns offset xx+ IP address xx.xx.xx.xx The reason is that the wow pattern from upper layer is in 802.3 format for this case, it need to convert it to 802.11 format. The input offset parameter is used for 802.3, but the actual offset firmware need depends on rx_decap_mode, so that it needs to be recalculated. Pattern of 802.3 packet is not same with 802.11 packet. If the rx_decap_mode is ATH10K_HW_TXRX_NATIVE_WIFI, then firmware will receive data packet with 802.11 format from hardware. Tested with QCA6174 hw3.0 with firmware WLAN.RM.4.4.1-00099-QCARMSWPZ-1, but this will also affect QCA9377. This has always failed, so it's not a regression with new firmware releases. Signed-off-by: Wen Gong Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 4 + drivers/net/wireless/ath/ath10k/wow.c | 138 ++++++++++++++++++++++++++++++++-- 2 files changed, 137 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2705973e39c6..16a39244a34f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6835,6 +6835,10 @@ struct wmi_wow_ev_arg { #define WOW_MIN_PATTERN_SIZE 1 #define WOW_MAX_PATTERN_SIZE 148 #define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) enum wmi_tdls_state { WMI_TDLS_DISABLE, diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index c4cbccb29b31..a6b179f88d36 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2015-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -76,6 +77,109 @@ static int ath10k_wow_cleanup(struct ath10k *ar) return 0; } +/** + * Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void ath10k_wow_convert_8023_to_80211 + (struct cfg80211_pkt_pattern *new, + const struct cfg80211_pkt_pattern *old) +{ + u8 hdr_8023_pattern[ETH_HLEN] = {}; + u8 hdr_8023_bit_mask[ETH_HLEN] = {}; + u8 hdr_80211_pattern[WOW_HDR_LEN] = {}; + u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {}; + + int total_len = old->pkt_offset + old->pattern_len; + int hdr_80211_end_offset; + + struct ieee80211_hdr_3addr *new_hdr_pattern = + (struct ieee80211_hdr_3addr *)hdr_80211_pattern; + struct ieee80211_hdr_3addr *new_hdr_mask = + (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask; + struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern; + struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask; + int hdr_len = sizeof(*new_hdr_pattern); + + struct rfc1042_hdr *new_rfc_pattern = + (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len); + struct rfc1042_hdr *new_rfc_mask = + (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len); + int rfc_len = sizeof(*new_rfc_pattern); + + memcpy(hdr_8023_pattern + old->pkt_offset, + old->pattern, ETH_HLEN - old->pkt_offset); + memcpy(hdr_8023_bit_mask + old->pkt_offset, + old->mask, ETH_HLEN - old->pkt_offset); + + /* Copy destination address */ + memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN); + memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN); + + /* Copy source address */ + memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN); + memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN); + + /* Copy logic link type */ + memcpy(&new_rfc_pattern->snap_type, + &old_hdr_pattern->h_proto, + sizeof(old_hdr_pattern->h_proto)); + memcpy(&new_rfc_mask->snap_type, + &old_hdr_mask->h_proto, + sizeof(old_hdr_mask->h_proto)); + + /* Caculate new pkt_offset */ + if (old->pkt_offset < ETH_ALEN) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr1); + else if (old->pkt_offset < offsetof(struct ethhdr, h_proto)) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr3) - + offsetof(struct ethhdr, h_source); + else + new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN; + + /* Caculate new hdr end offset */ + if (total_len > ETH_HLEN) + hdr_80211_end_offset = hdr_len + rfc_len; + else if (total_len > offsetof(struct ethhdr, h_proto)) + hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN; + else if (total_len > ETH_ALEN) + hdr_80211_end_offset = total_len - ETH_ALEN + + offsetof(struct ieee80211_hdr_3addr, addr3); + else + hdr_80211_end_offset = total_len + + offsetof(struct ieee80211_hdr_3addr, addr1); + + new->pattern_len = hdr_80211_end_offset - new->pkt_offset; + + memcpy((u8 *)new->pattern, + hdr_80211_pattern + new->pkt_offset, + new->pattern_len); + memcpy((u8 *)new->mask, + hdr_80211_bit_mask + new->pkt_offset, + new->pattern_len); + + if (total_len > ETH_HLEN) { + /* Copy frame body */ + memcpy((u8 *)new->pattern + new->pattern_len, + (void *)old->pattern + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + memcpy((u8 *)new->mask + new->pattern_len, + (void *)old->mask + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + + new->pattern_len += total_len - ETH_HLEN; + } +} + static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, struct cfg80211_wowlan *wowlan) { @@ -116,22 +220,40 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, for (i = 0; i < wowlan->n_patterns; i++) { u8 bitmask[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {}; + struct cfg80211_pkt_pattern new_pattern = {}; + struct cfg80211_pkt_pattern old_pattern = patterns[i]; int j; + new_pattern.pattern = ath_pattern; + new_pattern.mask = ath_bitmask; if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE) continue; - /* convert bytemask to bitmask */ for (j = 0; j < patterns[i].pattern_len; j++) if (patterns[i].mask[j / 8] & BIT(j % 8)) bitmask[j] = 0xff; + old_pattern.mask = bitmask; + new_pattern = old_pattern; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + if (patterns[i].pkt_offset < ETH_HLEN) + ath10k_wow_convert_8023_to_80211(&new_pattern, + &old_pattern); + else + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id, pattern_id, - patterns[i].pattern, - bitmask, - patterns[i].pattern_len, - patterns[i].pkt_offset); + new_pattern.pattern, + new_pattern.mask, + new_pattern.pattern_len, + new_pattern.pkt_offset); if (ret) { ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n", pattern_id, @@ -345,6 +467,12 @@ int ath10k_wow_init(struct ath10k *ar) return -EINVAL; ar->wow.wowlan_support = ath10k_wowlan_support; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; -- cgit v1.2.3 From 0fc8bb50bbfc82d113218c803e1965838dcdbc89 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 19 Apr 2018 19:40:11 +0300 Subject: wcn36xx: pass correct BSS index when deleting BSS keys The firmware message to delete BSS keys expects a BSS index to be passed. This field is currently hard-coded to 0. Fix this by passing in the index we received from the firmware when the BSS was configured. The encryption type in that message also needs to be set to what was used when the key was set, so the assignment of vif_priv->encrypt_type is now done after the firmware command was sent. This reportedly fixes the following error in AP mode: wcn36xx: ERROR hal_remove_bsskey response failed err=6 Also, AFAIU, when a BSS is deleted, the firmware apparently drops all the keys associated with it. Trying to remove the key explicitly afterwards will hence lead to the following message: wcn36xx: ERROR hal_remove_bsskey response failed err=16 This is now suppressed with an extra check for the BSS index validity. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 10 +++++++--- drivers/net/wireless/ath/wcn36xx/smd.c | 6 ++++-- drivers/net/wireless/ath/wcn36xx/smd.h | 2 ++ 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 32bbd6e2fd09..749aef3e2b85 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -549,6 +549,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } else { wcn36xx_smd_set_bsskey(wcn, vif_priv->encrypt_type, + vif_priv->bss_index, key_conf->keyidx, key_conf->keylen, key); @@ -566,10 +567,13 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case DISABLE_KEY: if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) { + if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) + wcn36xx_smd_remove_bsskey(wcn, + vif_priv->encrypt_type, + vif_priv->bss_index, + key_conf->keyidx); + vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; - wcn36xx_smd_remove_bsskey(wcn, - vif_priv->encrypt_type, - key_conf->keyidx); } else { sta_priv->is_data_encrypted = false; /* do not remove key if disassociated */ diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 9827a1e1124b..297a785d0785 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1636,6 +1636,7 @@ out: int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx, u8 keylen, u8 *key) @@ -1645,7 +1646,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ); - msg_body.bss_idx = 0; + msg_body.bss_idx = bssidx; msg_body.enc_type = enc_type; msg_body.num_keys = 1; msg_body.keys[0].id = keyidx; @@ -1706,6 +1707,7 @@ out: int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx) { struct wcn36xx_hal_remove_bss_key_req_msg msg_body; @@ -1713,7 +1715,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ); - msg_body.bss_idx = 0; + msg_body.bss_idx = bssidx; msg_body.enc_type = enc_type; msg_body.key_id = keyidx; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 8076edf40ac8..61bb8d43138c 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -97,6 +97,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx, u8 keylen, u8 *key); @@ -106,6 +107,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx); int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif); -- cgit v1.2.3 From 20529b33aa12224e4399b4486e5ab617cf8f3e5e Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Tue, 17 Apr 2018 14:54:26 +0530 Subject: ath10k: enable hw checksum for wcn3990 By default ath10k driver enables the support for HW_CHECKSUM (NETIF_F_HW_CSUM). Since the TCP/UDP checksum calculation is not enabled in the wcn3990 firmware the checksum is incorrect in the TCP/UDP packets and all patckets are dropped. But due note that wcn3990 support in ath10k is still incomplete so this isn't a critical fix (yet). Enable hw checksum calculations in wcn3990 hardware by setting the proper flags in msdu descriptor tso flags. Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 13 +++++++++++++ drivers/net/wireless/ath/ath10k/htt_tx.c | 7 +++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 4e5fe539eb77..5d3ff80f3a1f 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -128,6 +128,19 @@ struct htt_msdu_ext_desc_64 { | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20) +#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21) + +#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a086958c39b6..5d8b97a0ccaa 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1475,8 +1475,11 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; - if (ar->hw_params.continuous_frag_desc) - ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; + if (ar->hw_params.continuous_frag_desc) { + memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); + ext_desc->tso_flag[3] |= + __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); + } } /* Prevent firmware from sending up tx inspection requests. There's -- cgit v1.2.3 From b2e40d7ab8e2d36b455113df96dcbab0407e35c0 Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Tue, 17 Apr 2018 17:36:58 +0530 Subject: ath10k: add hw params for shadow register support wcn3990 supports shadow register for ce write. Add a hw param for shadow register support. Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 14 ++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 4 ++++ 2 files changed, 18 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 64674d8ce457..a8bb21173d2c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -90,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, }, { .id = QCA988X_HW_2_0_VERSION, @@ -120,6 +121,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA9887_HW_1_0_VERSION, @@ -150,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -179,6 +182,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -208,6 +212,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA6174_HW_3_0_VERSION, @@ -237,6 +242,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -269,6 +275,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -304,6 +311,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -344,6 +352,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -383,6 +392,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -412,6 +422,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -443,6 +454,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -479,6 +491,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -500,6 +513,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, .per_ce_irq = true, + .shadow_reg_support = true, }, }; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index b025a1bf2fde..dcefde76956c 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -571,6 +572,9 @@ struct ath10k_hw_params { /* target supporting per ce IRQ */ bool per_ce_irq; + + /* target supporting shadow register for ce write */ + bool shadow_reg_support; }; struct htt_rx_desc; -- cgit v1.2.3 From b7ba83f7c414e583fdf82a1b1b95d2376cdb4b45 Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Tue, 17 Apr 2018 17:36:59 +0530 Subject: ath10k: add support for shadow register for WNC3990 WCN3990 needs shadow register write operation support for copy engine for regular operation in powersave mode. Add support for copy engine shadow register write in datapath tx for WCN3990 Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 143 ++++++++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/ce.h | 4 + 2 files changed, 145 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 3a15923bdd26..020405b6d408 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,6 +59,74 @@ * the buffer is sent/received. */ +static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 0: + addr = 0x00032000; + break; + case 3: + addr = 0x0003200C; + break; + case 4: + addr = 0x00032010; + break; + case 5: + addr = 0x00032014; + break; + case 7: + addr = 0x0003201C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + return addr; +} + +static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 1: + addr = 0x00032034; + break; + case 2: + addr = 0x00032038; + break; + case 5: + addr = 0x00032044; + break; + case 7: + addr = 0x0003204C; + break; + case 8: + addr = 0x00032050; + break; + case 9: + addr = 0x00032054; + break; + case 10: + addr = 0x00032058; + break; + case 11: + addr = 0x0003205C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + + return addr; +} + static inline unsigned int ath10k_set_ring_byte(unsigned int offset, struct ath10k_hw_ce_regs_addr_map *addr_map) @@ -123,6 +192,22 @@ static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, ar->hw_ce_regs->current_srri_addr); } +static inline void +ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); +} + +static inline void +ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value); +} + static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int addr) @@ -376,8 +461,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -1251,6 +1342,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, return 0; } +static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 nentries) +{ + src_ring->shadow_base_unaligned = kcalloc(nentries, + sizeof(struct ce_desc), + GFP_KERNEL); + if (!src_ring->shadow_base_unaligned) + return -ENOMEM; + + src_ring->shadow_base = (struct ce_desc *) + PTR_ALIGN(src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + return 0; +} + static struct ath10k_ce_ring * ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) @@ -1258,6 +1365,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1294,6 +1402,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1304,6 +1425,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1339,6 +1461,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1505,6 +1640,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc) + @@ -1534,6 +1671,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc_64) + diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index d8f9da334529..9aea89133209 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -113,6 +114,9 @@ struct ath10k_ce_ring { /* CE address space */ u32 base_addr_ce_space; + char *shadow_base_unaligned; + struct ce_desc *shadow_base; + /* keep last */ void *per_transfer_context[0]; }; -- cgit v1.2.3 From 4945af5b264fbdbdb5a9021b8a6a179d0c7a33b2 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 17 Apr 2018 17:37:00 +0530 Subject: ath10k: enable SRRI/DRRI support on ddr for WCN3990 SRRI/DRRI are not mapped in the HW Shadow block and can lead to un-clocked access if common subsystem in the target is powered down due to idle mode. To mitigate this problem SRRI/DRRI can be read from DDR instead of doing an actual hardware read. Host allocates non cached memory on ddr and configures the physical address of this memory to the CE hardware. The hardware updates the RRI on this particular location. Read SRRI/DRRI from DDR location instead of direct target read. Enable retention restore on ddr using hw params to enable in specific targets. Signed-off-by: Govind Singh Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 102 +++++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/ce.h | 10 ++++ drivers/net/wireless/ath/ath10k/core.c | 14 +++++ drivers/net/wireless/ath/ath10k/hw.c | 9 ++- drivers/net/wireless/ath/ath10k/hw.h | 13 ++++- drivers/net/wireless/ath/ath10k/snoc.c | 3 + 6 files changed, 142 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 020405b6d408..3b96a43fbda4 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -185,11 +185,30 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, ar->hw_ce_regs->sr_wr_index_addr); } +static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar, + u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_srri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); + + return index; } static inline void @@ -266,11 +285,31 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); } +static inline + u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) & + CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_drri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); + + return index; } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, @@ -486,7 +525,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc_64 *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int sw_index = src_ring->sw_index; + unsigned int sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; __le32 *addr; @@ -500,6 +539,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); + if (ar->hw_params.rri_on_ddr) + sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id); + else + sw_index = src_ring->sw_index; + if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -1016,7 +1060,10 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, src_ring->hw_index = read_index; } - read_index = src_ring->hw_index; + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; if (read_index == sw_index) return -EIO; @@ -1841,3 +1888,46 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, return 0; } EXPORT_SYMBOL(ath10k_ce_alloc_pipe); + +void ath10k_ce_alloc_rri(struct ath10k *ar) +{ + int i; + u32 value; + u32 ctrl1_regs; + u32 ce_base_addr; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->vaddr_rri = dma_alloc_coherent(ar->dev, + (CE_COUNT * sizeof(u32)), + &ce->paddr_rri, GFP_KERNEL); + + if (!ce->vaddr_rri) + return; + + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low, + lower_32_bits(ce->paddr_rri)); + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, + (upper_32_bits(ce->paddr_rri) & + CE_DESC_FLAGS_GET_MASK)); + + for (i = 0; i < CE_COUNT; i++) { + ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; + ce_base_addr = ath10k_ce_base_address(ar, i); + value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs); + value |= ar->hw_ce_regs->upd->mask; + ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); + } + + memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32)); +} +EXPORT_SYMBOL(ath10k_ce_alloc_rri); + +void ath10k_ce_free_rri(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), + ce->vaddr_rri, + ce->paddr_rri); +} +EXPORT_SYMBOL(ath10k_ce_free_rri); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 9aea89133209..dbeffaef6024 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -49,6 +49,9 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb +#define CE_DDR_RRI_MASK GENMASK(15, 0) +#define CE_DDR_DRRI_SHIFT 16 + struct ce_desc { __le32 addr; __le16 nbytes; @@ -157,6 +160,8 @@ struct ath10k_ce { spinlock_t ce_lock; const struct ath10k_bus_ops *bus_ops; struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + u32 *vaddr_rri; + dma_addr_t paddr_rri; }; /*==================Send====================*/ @@ -265,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data); +void ath10k_ce_alloc_rri(struct ath10k *ar); +void ath10k_ce_free_rri(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ @@ -331,6 +338,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; } +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \ + - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) + #define CE_SRC_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index a8bb21173d2c..4cf54a7ef09a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -91,6 +91,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA988X_HW_2_0_VERSION, @@ -122,6 +123,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9887_HW_1_0_VERSION, @@ -153,6 +155,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -183,6 +186,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -213,6 +217,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_3_0_VERSION, @@ -243,6 +248,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -276,6 +282,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -312,6 +319,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -353,6 +361,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -393,6 +402,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -423,6 +433,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -455,6 +466,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -492,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -514,6 +527,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, .per_ce_irq = true, .shadow_reg_support = true, + .rri_on_ddr = true, }, }; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 497ac33e0fbf..677535b3d207 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -310,6 +310,12 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { .wm_high = &wcn3990_dst_wm_high, }; +static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { + .shift = 19, + .mask = 0x00080000, + .enable = 0x00000000, +}; + const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .sr_base_addr = 0x00000000, .sr_size_addr = 0x00000008, @@ -320,8 +326,6 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .dst_wr_index_addr = 0x00000040, .current_srri_addr = 0x00000044, .current_drri_addr = 0x00000048, - .ddr_addr_for_rri_low = 0x00000004, - .ddr_addr_for_rri_high = 0x00000008, .ce_rri_low = 0x0024C004, .ce_rri_high = 0x0024C008, .host_ie_addr = 0x0000002c, @@ -331,6 +335,7 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .misc_regs = &wcn3990_misc_reg, .wm_srcr = &wcn3990_wm_src_ring, .wm_dstr = &wcn3990_wm_dst_ring, + .upd = &wcn3990_ctrl1_upd, }; const struct ath10k_hw_values wcn3990_values = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index dcefde76956c..b8bdabe73073 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -336,6 +336,12 @@ struct ath10k_hw_ce_dst_src_wm_regs { struct ath10k_hw_ce_regs_addr_map *wm_low; struct ath10k_hw_ce_regs_addr_map *wm_high; }; +struct ath10k_hw_ce_ctrl1_upd { + u32 shift; + u32 mask; + u32 enable; +}; + struct ath10k_hw_ce_regs { u32 sr_base_addr; u32 sr_size_addr; @@ -358,7 +364,9 @@ struct ath10k_hw_ce_regs { struct ath10k_hw_ce_cmd_halt *cmd_halt; struct ath10k_hw_ce_host_ie *host_ie; struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; - struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; }; + struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + struct ath10k_hw_ce_ctrl1_upd *upd; +}; struct ath10k_hw_values { u32 rtc_state_val_on; @@ -575,6 +583,9 @@ struct ath10k_hw_params { /* target supporting shadow register for ce write */ bool shadow_reg_support; + + /* target supporting retention restore on ddr */ + bool rri_on_ddr; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 2e490ff124f1..47a4d2a5bd4c 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -767,6 +767,7 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); ath10k_snoc_wlan_disable(ar); + ath10k_ce_free_rri(ar); } static int ath10k_snoc_hif_power_up(struct ath10k *ar) @@ -782,6 +783,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) return ret; } + ath10k_ce_alloc_rri(ar); + ret = ath10k_snoc_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); -- cgit v1.2.3 From d5cded16fdc02a31f9f73d899329896218c594aa Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 17 Apr 2018 17:37:01 +0530 Subject: ath10k: enable sta idle power save Enable sta power save in fw for the targets that supports idle power save. The idle ps enable command will be ignored by the firmware which does not support this feature. Signed-off-by: Govind Singh Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index c71cf5b81385..3d7119ad7c7a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4679,6 +4679,13 @@ static int ath10k_start(struct ieee80211_hw *hw) } } + param = ar->wmi.pdev_param->idle_ps_config; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); + goto err_core_stop; + } + __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); /* -- cgit v1.2.3 From 14ca3c98aa12842615a36812f29dc39b60811c93 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 17 Apr 2018 15:23:33 +0200 Subject: wcn36xx: abort scan request when 'dequeued' indicator is sent When the firmware sends a WCN36XX_HAL_SCAN_IND_DEQUEUED indication, the request is apparently no longer valid. Attempts to stop the hardware scan request subsequently will lead to the following error message, and the hardware is no longer able to communicate with any AP: [ 57.917186] wcn36xx: ERROR hal_stop_scan_offload response failed err=5 Interpreting this indicator message as scan abortion fixes this. While at it, add a newline to a debug print. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 297a785d0785..b7964f79d837 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2140,10 +2140,11 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) return -EIO; } - wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type); + wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)\n", rsp->type); switch (rsp->type) { case WCN36XX_HAL_SCAN_IND_FAILED: + case WCN36XX_HAL_SCAN_IND_DEQUEUED: scan_info.aborted = true; /* fall through */ case WCN36XX_HAL_SCAN_IND_COMPLETED: @@ -2156,7 +2157,6 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) break; case WCN36XX_HAL_SCAN_IND_STARTED: case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL: - case WCN36XX_HAL_SCAN_IND_DEQUEUED: case WCN36XX_HAL_SCAN_IND_PREEMPTED: case WCN36XX_HAL_SCAN_IND_RESTARTED: break; -- cgit v1.2.3 From 80c764d32122eceb26754d9ffa9718c124c8072e Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 17 Apr 2018 15:23:34 +0200 Subject: wcn36xx: cancel pending scan request when interface goes down When the network interface goes down while a scan request is still pending that can't be stopped due to firmware hickups, wcn->scan_req remains set, even though the hardware is deinitialized. This results in -EBUSY for all scan requests after the interface was brought up again. Fix this by explicitly completing pending scan requests in wcn36xx_stop(). Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 749aef3e2b85..08b6939d3f57 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -353,6 +353,19 @@ static void wcn36xx_stop(struct ieee80211_hw *hw) wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n"); + cancel_work_sync(&wcn->scan_work); + + mutex_lock(&wcn->scan_lock); + if (wcn->scan_req) { + struct cfg80211_scan_info scan_info = { + .aborted = true, + }; + + ieee80211_scan_completed(wcn->hw, &scan_info); + } + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); + wcn36xx_debugfs_exit(wcn); wcn36xx_smd_stop(wcn); wcn36xx_dxe_deinit(wcn); -- cgit v1.2.3 From 89722f5767ac71b66e3077b6a158495eb316a43c Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 17 Apr 2018 15:23:35 +0200 Subject: wcn36xx: handle scan cancellation when firmware support is missing For firmwares that don't have the SCAN_OFFLOAD feature bit set, do not call into wcn36xx_smd_stop_hw_scan(). Instead, stop the asynchronous work and call into ieee80211_scan_completed() immediately. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 08b6939d3f57..e3b91b3b38ef 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -687,10 +687,18 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, wcn->scan_aborted = true; mutex_unlock(&wcn->scan_lock); - /* ieee80211_scan_completed will be called on FW scan indication */ - wcn36xx_smd_stop_hw_scan(wcn); + if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) { + /* ieee80211_scan_completed will be called on FW scan + * indication */ + wcn36xx_smd_stop_hw_scan(wcn); + } else { + struct cfg80211_scan_info scan_info = { + .aborted = true, + }; - cancel_work_sync(&wcn->scan_work); + cancel_work_sync(&wcn->scan_work); + ieee80211_scan_completed(wcn->hw, &scan_info); + } } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, -- cgit v1.2.3 From 5a425c8b78e5a3d4850035c737f4f453f528320a Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 17 Apr 2018 15:23:36 +0200 Subject: wcn36xx: send bss_type in scan requests Pass the bss_type of the currently configured BSS in the message for the scan request. Therefore, that setting needs to be kept in struct wcn36xx_vif. This seems to be only interesting when scanning for a specific SSID and doesn't matter for regular wildcard scans. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 5 ++++- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index b7964f79d837..9c8ce5e0454f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -620,6 +620,7 @@ out: int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_hal_start_scan_offload_req_msg msg_body; int ret, i; @@ -631,6 +632,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, msg_body.max_ch_time = 100; msg_body.scan_hidden = 1; memcpy(msg_body.mac, vif->addr, ETH_ALEN); + msg_body.bss_type = vif_priv->bss_type; msg_body.p2p_search = vif->p2p; msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids)); @@ -1399,9 +1401,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, bss->spectrum_mgt_enable = 0; bss->tx_mgmt_power = 0; bss->max_tx_power = WCN36XX_MAX_POWER(wcn); - bss->action = update; + vif_priv->bss_type = bss->bss_type; + wcn36xx_dbg(WCN36XX_DBG_HAL, "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", bss->bssid, bss->self_mac_addr, bss->bss_type, diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 5854adf43f3a..22e05cb4eb98 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -123,6 +123,7 @@ struct wcn36xx_vif { bool is_joining; bool sta_assoc; struct wcn36xx_hal_mac_ssid ssid; + enum wcn36xx_hal_bss_type bss_type; /* Power management */ enum wcn36xx_power_state pw_state; -- cgit v1.2.3 From 4836ec42b07433fc8bdaf36a18f74f7a89659bb4 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 17 Apr 2018 15:23:37 +0200 Subject: wcn36xx: pass information elements in scan requests When the wifi driver core passes IE elements in the scan request, append them to the firmware message. The driver currently tells the core that it is capable of attaching up to WCN36XX_MAX_SCAN_IE_LEN octets, but doesn't actually pass them to the the hardware. Note that this patch doesn't fix a bug that was observed. The change is merely done for the sake of completeness as the hardware supports appending IEs in scans. Tests show that network scans work fine with this patch applied. Some defines were moved around to avoid cyclic include dependencies. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/hal.h | 8 +++++++- drivers/net/wireless/ath/wcn36xx/smd.c | 11 +++++++++++ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 6 ------ 3 files changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 182963522941..2aed6c233508 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -88,6 +88,12 @@ /* version string max length (including NULL) */ #define WCN36XX_HAL_VERSION_LENGTH 64 +/* How many frames until we start a-mpdu TX session */ +#define WCN36XX_AMPDU_START_THRESH 20 + +#define WCN36XX_MAX_SCAN_SSIDS 9 +#define WCN36XX_MAX_SCAN_IE_LEN 500 + /* message types for messages exchanged between WDI and HAL */ enum wcn36xx_hal_host_msg_type { /* Init/De-Init */ @@ -1170,7 +1176,7 @@ struct wcn36xx_hal_start_scan_offload_req_msg { /* IE field */ u16 ie_len; - u8 ie[0]; + u8 ie[WCN36XX_MAX_SCAN_IE_LEN]; } __packed; struct wcn36xx_hal_start_scan_offload_rsp_msg { diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 9c8ce5e0454f..ea74f2b92df5 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -624,6 +624,9 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_start_scan_offload_req_msg msg_body; int ret, i; + if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN) + return -EINVAL; + mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ); @@ -648,6 +651,14 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, for (i = 0; i < msg_body.num_channel; i++) msg_body.channels[i] = req->channels[i]->hw_value; + msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN; + + if (req->ie_len > 0) { + msg_body.ie_len = req->ie_len; + msg_body.header.len += req->ie_len; + memcpy(msg_body.ie, req->ie, req->ie_len); + } + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); wcn36xx_dbg(WCN36XX_DBG_HAL, diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 22e05cb4eb98..9343989d1169 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -32,12 +32,6 @@ #define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" #define WCN36XX_AGGR_BUFFER_SIZE 64 -/* How many frames until we start a-mpdu TX session */ -#define WCN36XX_AMPDU_START_THRESH 20 - -#define WCN36XX_MAX_SCAN_SSIDS 9 -#define WCN36XX_MAX_SCAN_IE_LEN 500 - extern unsigned int wcn36xx_dbg_mask; enum wcn36xx_debug_mask { -- cgit v1.2.3 From 026a807c2de37aa826748c2ffa1969fc778406b2 Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Tue, 24 Apr 2018 13:36:01 +0300 Subject: net/dim: Rename *_get_profile() functions to *_get_rx_moderation() Preparation for introducing adaptive TX to net DIM. Signed-off-by: Tal Gilboa Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 6 +++--- drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c | 8 ++++---- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_dim.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ++++-- include/linux/net_dim.h | 12 ++++++------ 6 files changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9a3c1a76d5d..effc651a2a2f 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -654,7 +654,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, pkts = priv->rx_max_coalesced_frames; if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { - moder = net_dim_get_def_profile(priv->dim.dim.mode); + moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); usecs = moder.usec; pkts = moder.pkts; } @@ -1064,7 +1064,7 @@ static void bcm_sysport_dim_work(struct work_struct *work) struct bcm_sysport_priv *priv = container_of(ndim, struct bcm_sysport_priv, dim); struct net_dim_cq_moder cur_profile = - net_dim_get_profile(dim->mode, dim->profile_ix); + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); dim->state = NET_DIM_START_MEASURE; @@ -1437,7 +1437,7 @@ static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) /* If DIM was enabled, re-apply default parameters */ if (dim->use_dim) { - moder = net_dim_get_def_profile(dim->dim.mode); + moder = net_dim_get_def_rx_moderation(dim->dim.mode); usecs = moder.usec; pkts = moder.pkts; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c index 408dd190331e..afa97c8bb081 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c @@ -21,11 +21,11 @@ void bnxt_dim_work(struct work_struct *work) struct bnxt_napi *bnapi = container_of(cpr, struct bnxt_napi, cp_ring); - struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, - dim->profile_ix); + struct net_dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - cpr->rx_ring_coal.coal_ticks = cur_profile.usec; - cpr->rx_ring_coal.coal_bufs = cur_profile.pkts; + cpr->rx_ring_coal.coal_ticks = cur_moder.usec; + cpr->rx_ring_coal.coal_bufs = cur_moder.pkts; bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); dim->state = NET_DIM_START_MEASURE; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 0445f2c0c629..20c1681bb1af 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -652,7 +652,7 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, pkts = ring->rx_max_coalesced_frames; if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { - moder = net_dim_get_def_profile(ring->dim.dim.mode); + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); usecs = moder.usec; pkts = moder.pkts; } @@ -1925,7 +1925,7 @@ static void bcmgenet_dim_work(struct work_struct *work) struct bcmgenet_rx_ring *ring = container_of(ndim, struct bcmgenet_rx_ring, dim); struct net_dim_cq_moder cur_profile = - net_dim_get_profile(dim->mode, dim->profile_ix); + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); dim->state = NET_DIM_START_MEASURE; @@ -2102,7 +2102,7 @@ static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) /* If DIM was enabled, re-apply default parameters */ if (dim->use_dim) { - moder = net_dim_get_def_profile(dim->dim.mode); + moder = net_dim_get_def_rx_moderation(dim->dim.mode); usecs = moder.usec; pkts = moder.pkts; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 602851ab5b14..1b286e1985d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -38,11 +38,11 @@ void mlx5e_rx_dim_work(struct work_struct *work) struct net_dim *dim = container_of(work, struct net_dim, work); struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); - struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, - dim->profile_ix); + struct net_dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, - cur_profile.usec, cur_profile.pkts); + cur_moder.usec, cur_moder.pkts); dim->state = NET_DIM_START_MEASURE; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f10037419978..a69cc1cc7bfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4119,12 +4119,14 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) switch (cq_period_mode) { case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: params->rx_cq_moderation = - net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); + net_dim_get_def_rx_moderation( + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); break; case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: default: params->rx_cq_moderation = - net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); + net_dim_get_def_rx_moderation( + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); } } diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index 29ed8fd6379a..7ca3c4deb3a6 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -129,17 +129,17 @@ profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { NET_DIM_CQE_PROFILES, }; -static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode, - int ix) +static inline struct net_dim_cq_moder +net_dim_get_rx_moderation(u8 cq_period_mode, int ix) { - struct net_dim_cq_moder cq_moder; + struct net_dim_cq_moder cq_moder = profile[cq_period_mode][ix]; - cq_moder = profile[cq_period_mode][ix]; cq_moder.cq_period_mode = cq_period_mode; return cq_moder; } -static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode) +static inline struct net_dim_cq_moder +net_dim_get_def_rx_moderation(u8 rx_cq_period_mode) { int default_profile_ix; @@ -148,7 +148,7 @@ static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mo else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */ default_profile_ix = NET_DIM_DEF_PROFILE_EQE; - return net_dim_get_profile(rx_cq_period_mode, default_profile_ix); + return net_dim_get_rx_moderation(rx_cq_period_mode, default_profile_ix); } static inline bool net_dim_on_top(struct net_dim *dim) -- cgit v1.2.3 From cbce4f44479856cd4621e3dce531cfb078357b1f Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Tue, 24 Apr 2018 13:36:03 +0300 Subject: net/mlx5e: Enable adaptive-TX moderation Add support for adaptive TX moderation. This greatly reduces TX interrupt rate and increases bandwidth, mostly for TCP bandwidth over ARM architecture (below). There is a slight single stream TCP with very large message sizes degradation (x86). In this case if there's any moderation on transmitted packets the bandwidth would reduce due to hitting TCP output limit. Since this is a synthetic case, this is still worth doing. Performance improvement (ConnectX-4Lx 40GbE, ARM) TCP 64B bandwidth with 1-50 streams increased 6-35%. TCP 64B bandwidth with 100-500 streams increased 20-70%. Performance improvement (ConnectX-5 100GbE, x86) Bandwidth: increased up to 40% (1024B with 10s of streams). Interrupt rate: reduced up to 50% (1024B with 1000s of streams). Performance degradation (ConnectX-5 100GbE, x86) Bandwidth: up to 10% decrease single stream TCP (1MB message size from 51Gb/s to 47Gb/s). Signed-off-by: Tal Gilboa Reviewed-by: Tariq Toukan Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ++ drivers/net/ethernet/mellanox/mlx5/core/en_dim.c | 24 +++++-- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 35 +++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 81 +++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 37 +++++++--- 5 files changed, 125 insertions(+), 56 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3317a4da87cb..56c748ca2a09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -241,6 +241,7 @@ struct mlx5e_params { bool vlan_strip_disable; bool scatter_fcs_en; bool rx_dim_enabled; + bool tx_dim_enabled; u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; @@ -330,6 +331,7 @@ enum { MLX5E_SQ_STATE_ENABLED, MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, + MLX5E_SQ_STATE_AM, }; struct mlx5e_sq_wqe_info { @@ -342,6 +344,7 @@ struct mlx5e_txqsq { /* dirtied @completion */ u16 cc; u32 dma_fifo_cc; + struct net_dim dim; /* Adaptive Moderation */ /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; @@ -1111,4 +1114,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, u16 max_channels, u16 mtu); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); +void mlx5e_tx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 1b286e1985d2..d67adf70a97b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -33,16 +33,30 @@ #include #include "en.h" +static void +mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder, + struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) +{ + mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); + dim->state = NET_DIM_START_MEASURE; +} + void mlx5e_rx_dim_work(struct work_struct *work) { - struct net_dim *dim = container_of(work, struct net_dim, - work); + struct net_dim *dim = container_of(work, struct net_dim, work); struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); struct net_dim_cq_moder cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, - cur_moder.usec, cur_moder.pkts); + mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); +} - dim->state = NET_DIM_START_MEASURE; +void mlx5e_tx_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); + struct net_dim_cq_moder cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 37fd0245b6c1..2b786c4d3dab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -389,14 +389,20 @@ static int mlx5e_set_channels(struct net_device *dev, int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { + struct net_dim_cq_moder *rx_moder, *tx_moder; + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -EOPNOTSUPP; - coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec; - coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts; - coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec; - coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts; - coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + rx_moder = &priv->channels.params.rx_cq_moderation; + coal->rx_coalesce_usecs = rx_moder->usec; + coal->rx_max_coalesced_frames = rx_moder->pkts; + coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + + tx_moder = &priv->channels.params.tx_cq_moderation; + coal->tx_coalesce_usecs = tx_moder->usec; + coal->tx_max_coalesced_frames = tx_moder->pkts; + coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; return 0; } @@ -438,6 +444,7 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { + struct net_dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; int err = 0; @@ -463,11 +470,15 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; - new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; - new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; - new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + rx_moder = &new_channels.params.rx_cq_moderation; + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + + tx_moder = &new_channels.params.tx_cq_moderation; + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; + new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -475,7 +486,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; + reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || + (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); + if (!reset) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a69cc1cc7bfb..f1fe490ed794 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1025,6 +1025,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; + INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); + sq->dim.mode = params->tx_cq_moderation.cq_period_mode; + sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; return 0; @@ -1188,6 +1191,9 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, if (tx_rate) mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); + if (params->tx_dim_enabled) + sq->state |= BIT(MLX5E_SQ_STATE_AM); + return 0; err_free_txqsq: @@ -4084,18 +4090,48 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; } -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) { - params->tx_cq_moderation.cq_period_mode = cq_period_mode; + struct net_dim_cq_moder moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} - params->tx_cq_moderation.pkts = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - params->tx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; +static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) +{ + struct net_dim_cq_moder moder; + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - params->tx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) +{ + return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE : + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); + } else { + params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); + } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, params->tx_cq_moderation.cq_period_mode == @@ -4104,30 +4140,12 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) { - params->rx_cq_moderation.cq_period_mode = cq_period_mode; - - params->rx_cq_moderation.pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - params->rx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - params->rx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - if (params->rx_dim_enabled) { - switch (cq_period_mode) { - case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: - params->rx_cq_moderation = - net_dim_get_def_rx_moderation( - NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); - break; - case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: - default: - params->rx_cq_moderation = - net_dim_get_def_rx_moderation( - NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); - } + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); + } else { + params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, @@ -4191,6 +4209,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index f292bb346985..900661d45c8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -44,6 +44,30 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) return cpumask_test_cpu(current_cpu, aff); } +static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) +{ + struct net_dim_sample dim_sample; + + if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_AM))) + return; + + net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, + &dim_sample); + net_dim(&sq->dim, dim_sample); +} + +static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) +{ + struct net_dim_sample dim_sample; + + if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_AM))) + return; + + net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, + &dim_sample); + net_dim(&rq->dim, dim_sample); +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, @@ -75,18 +99,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; - for (i = 0; i < c->num_tc; i++) + for (i = 0; i < c->num_tc; i++) { + mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); - - if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) { - struct net_dim_sample dim_sample; - net_dim_sample(c->rq.cq.event_ctr, - c->rq.stats.packets, - c->rq.stats.bytes, - &dim_sample); - net_dim(&c->rq.dim, dim_sample); } + mlx5e_handle_rx_dim(&c->rq); + mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); -- cgit v1.2.3 From fbb93020b89bc0270a9ca3c2479bbbbc548d9ee5 Mon Sep 17 00:00:00 2001 From: Dmitry Lebed Date: Mon, 26 Mar 2018 16:36:33 +0300 Subject: qtnfmac: add DFS offload support DFS offload support implemented: - DFS_OFFLOAD feature is advertised depending on HW capabilities - CAC_STARTED event forwarding from HW implemented - start_radar_detection() callback now returning -ENOTSUPP if DFS_OFFLOAD is enabled Signed-off-by: Dmitry Lebed Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 9 +++++++++ drivers/net/wireless/quantenna/qtnfmac/event.c | 11 +++++++++++ drivers/net/wireless/quantenna/qtnfmac/qlink.h | 7 +++++-- 3 files changed, 25 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 0398bece5782..5122dc798064 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -813,6 +813,9 @@ static int qtnf_start_radar_detection(struct wiphy *wiphy, struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); int ret; + if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) + return -ENOTSUPP; + ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms); if (ret) pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret); @@ -909,6 +912,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus) { struct wiphy *wiphy; + if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) + qtn_cfg80211_ops.start_radar_detection = NULL; + wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac)); if (!wiphy) return NULL; @@ -982,6 +988,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH; + if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); + wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index bcd415f96412..cb2a6c12f870 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -443,6 +443,17 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif, cfg80211_cac_event(vif->netdev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); break; + case QLINK_RADAR_CAC_STARTED: + if (vif->wdev.cac_started) + break; + + if (!wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_DFS_OFFLOAD)) + break; + + cfg80211_cac_event(vif->netdev, &chandef, + NL80211_RADAR_CAC_STARTED, GFP_KERNEL); + break; default: pr_warn("%s: unhandled radar event %u\n", vif->netdev->name, ev->event); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 9bf3ae4d1b3b..9ab27e158023 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -68,10 +68,12 @@ struct qlink_msg_header { * @QLINK_HW_CAPAB_STA_INACT_TIMEOUT: device implements a logic to kick-out * associated STAs due to inactivity. Inactivity timeout period is taken * from QLINK_CMD_START_AP parameters. + * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality */ enum qlink_hw_capab { - QLINK_HW_CAPAB_REG_UPDATE = BIT(0), - QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), + QLINK_HW_CAPAB_REG_UPDATE = BIT(0), + QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), + QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), }; enum qlink_iface_type { @@ -1031,6 +1033,7 @@ enum qlink_radar_event { QLINK_RADAR_CAC_ABORTED, QLINK_RADAR_NOP_FINISHED, QLINK_RADAR_PRE_CAC_EXPIRED, + QLINK_RADAR_CAC_STARTED, }; /** -- cgit v1.2.3 From 0fb1abc3a0b8f83b312b3c00e98643a17df69e87 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Mar 2018 16:38:18 +0100 Subject: cw1200: fix spelling mistake: "Mailformed" -> "Malformed" Trivial fix to spelling mistake in wiphy_warn warning message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/st/cw1200/txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index e9050b41157a..f7b1b0062db3 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -1069,7 +1069,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, } if (skb->len < sizeof(struct ieee80211_pspoll)) { - wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); + wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n"); goto drop; } -- cgit v1.2.3 From 5dc3638735f1c8a6ccd7f6ea361546567deb5f0f Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 29 Mar 2018 19:44:49 +0530 Subject: rsi: move xtend_desc structure from rsi_main.h to rsi_mgmt.h All descriptor structures are in rsi_mgmt.h except this extended descriptor structure. Hence moving it to rsi_mgmt.h and also renaming to rsi_xtend_desc. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 12 ++++++------ drivers/net/wireless/rsi/rsi_main.h | 6 ------ drivers/net/wireless/rsi/rsi_mgmt.h | 6 ++++++ 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index de608ae365a4..ce6a86deea65 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -55,7 +55,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; struct ieee80211_bss_conf *bss = NULL; - struct xtended_desc *xtend_desc = NULL; + struct rsi_xtended_desc *xtend_desc = NULL; u8 header_size; u32 dword_align_bytes = 0; @@ -69,7 +69,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) vif = tx_params->vif; /* Update header size */ - header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Failed to add extended descriptor\n", @@ -92,7 +92,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) wh = (struct ieee80211_hdr *)&skb->data[header_size]; mgmt_desc = (struct rsi_mgmt_desc *)skb->data; - xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); @@ -158,7 +158,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) struct skb_info *tx_params; struct ieee80211_bss_conf *bss; struct rsi_data_desc *data_desc; - struct xtended_desc *xtend_desc; + struct rsi_xtended_desc *xtend_desc; u8 ieee80211_size = MIN_802_11_HDR_LEN; u8 header_size; u8 vap_id = 0; @@ -170,7 +170,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) bss = &vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); return -ENOSPC; @@ -188,7 +188,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc = (struct rsi_data_desc *)skb->data; memset(data_desc, 0, header_size); - xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; wh = (struct ieee80211_hdr *)&skb->data[header_size]; seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl)); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index ef4fa323694b..b81cdbf05e5b 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -190,12 +190,6 @@ struct cqm_info { u32 rssi_hyst; }; -struct xtended_desc { - u8 confirm_frame_type; - u8 retry_cnt; - u16 reserved; -}; - enum rsi_dfs_regions { RSI_REGION_FCC = 0, RSI_REGION_ETSI, diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index cf6567ae5bbe..6726e84a6773 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -301,6 +301,12 @@ struct rsi_mac_frame { #define ENCAP_MGMT_PKT BIT(7) #define DESC_IMMEDIATE_WAKEUP BIT(15) +struct rsi_xtended_desc { + u8 confirm_frame_type; + u8 retry_cnt; + u16 reserved; +}; + struct rsi_cmd_desc_dword0 { __le16 len_qno; u8 frame_type; -- cgit v1.2.3 From 1be05eb5e4dc8e3812ae72bdd55e6fa4de3ee948 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 29 Mar 2018 19:44:50 +0530 Subject: rsi: move descriptor preparation to core Descriptors preparation is moved to core instead of HAL to avoid synchronization issues in sending TX frames Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 11 +++++++++ drivers/net/wireless/rsi/rsi_91x_hal.c | 43 +++++++++++++++++---------------- drivers/net/wireless/rsi/rsi_hal.h | 2 ++ 3 files changed, 35 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 5dafd2e1306c..f5d17568685b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -413,6 +413,11 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) (ieee80211_is_qos_nullfunc(wh->frame_control))) { q_num = MGMT_SOFT_Q; skb->priority = q_num; + + if (rsi_prepare_mgmt_desc(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare desc\n"); + goto xmit_fail; + } } else { if (ieee80211_is_data_qos(wh->frame_control)) { tid = (skb->data[24] & IEEE80211_QOS_TID); @@ -433,6 +438,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) if (!rsta) goto xmit_fail; tx_params->sta_id = rsta->sta_id; + } else { + tx_params->sta_id = 0; } if (rsta) { @@ -443,6 +450,10 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } + if (rsi_prepare_data_desc(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n"); + goto xmit_fail; + } } if ((q_num < MGMT_SOFT_Q) && diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index ce6a86deea65..43d7d6c7ccb8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -45,7 +45,7 @@ int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) return status; } -static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) +int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; struct ieee80211_hdr *wh = NULL; @@ -113,17 +113,6 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) if (conf_is_ht40(conf)) mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); - if (ieee80211_is_probe_req(wh->frame_control)) { - if (!bss->assoc) { - rsi_dbg(INFO_ZONE, - "%s: blocking mgmt queue\n", __func__); - mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; - xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; - common->mgmt_q_block = true; - rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); - } - } - if (ieee80211_is_probe_resp(wh->frame_control)) { mgmt_desc->misc_flags |= (RSI_ADD_DELTA_TSF_VAP_ID | RSI_FETCH_RETRY_CNT_FRM_HST); @@ -149,7 +138,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) } /* This function prepares descriptor for given data packet */ -static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) +int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; struct ieee80211_vif *vif; @@ -301,10 +290,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) (!bss->assoc)) goto err; - status = rsi_prepare_data_desc(common, skb); - if (status) - goto err; - status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); @@ -327,12 +312,18 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; + struct ieee80211_bss_conf *bss; + struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; struct skb_info *tx_params; + struct rsi_mgmt_desc *mgmt_desc; + struct rsi_xtended_desc *xtend_desc; int status = -E2BIG; + u8 header_size; info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; + header_size = tx_params->internal_hdr_size; if (tx_params->flags & INTERNAL_MGMT_PKT) { status = adapter->host_intf_ops->write_pkt(common->priv, @@ -346,15 +337,25 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } - if (FRAME_DESC_SZ > skb_headroom(skb)) - goto err; + bss = &info->control.vif->bss_conf; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; + + /* Indicate to firmware to give cfm for probe */ + if (ieee80211_is_probe_req(wh->frame_control) && !bss->assoc) { + rsi_dbg(INFO_ZONE, + "%s: blocking mgmt queue\n", __func__); + mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; + common->mgmt_q_block = true; + rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); + } - rsi_prepare_mgmt_desc(common, skb); status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); -err: rsi_indicate_tx_status(common->priv, skb, status); return status; } diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 786dccd0b732..d6c2baa56e9f 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -167,6 +167,8 @@ struct rsi_bt_desc { } __packed; int rsi_hal_device_init(struct rsi_hw *adapter); +int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb); +int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb); int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb); int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb); int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb); -- cgit v1.2.3 From 62dc108d5f7c4443ce88e1d6157e8547edbc07b3 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 29 Mar 2018 19:44:51 +0530 Subject: rsi: enable 80MHz clock by default 80MHz clock for device should be enabled by default in TX command frame radio capabilities. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index c21fca750fd4..9207da09a4f7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -325,8 +325,8 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->channel_num = common->channel; radio_caps->rf_model = RSI_RF_TYPE; + radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; if (common->channel_width == BW_40MHZ) { - radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { -- cgit v1.2.3 From 4fd6c4762f372852d65ad737ab35996395295dce Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 29 Mar 2018 19:44:52 +0530 Subject: rsi: roaming enhancements To support roaming below changes are done: * Station notify frame is send to firmware after sending assoc request. This will avoid dropping of first EAPOL frame due to delay in creation of station control block in firmware. * Data queues are unblocked after sending station notify in open mode, after configuring key in WEP mode, and after receiving EAPOL4 confirm in WPA mode. * Initial EAPOL frames priority is chaged to MGMT, rekey EAPOL frames priority changed to VO. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 16 +++++++++++++++ drivers/net/wireless/rsi/rsi_91x_hal.c | 18 ++++++++++++++++ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 18 +++++++++++++--- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 32 +++++++++++++++++------------ drivers/net/wireless/rsi/rsi_main.h | 1 + drivers/net/wireless/rsi/rsi_mgmt.h | 10 ++++++++- 6 files changed, 78 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index f5d17568685b..3ca468b9f2b8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -411,6 +411,18 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) if ((ieee80211_is_mgmt(wh->frame_control)) || (ieee80211_is_ctl(wh->frame_control)) || (ieee80211_is_qos_nullfunc(wh->frame_control))) { + if (ieee80211_is_assoc_req(wh->frame_control) || + ieee80211_is_reassoc_req(wh->frame_control)) { + struct ieee80211_bss_conf *bss = &vif->bss_conf; + + common->eapol4_confirm = false; + rsi_hal_send_sta_notify_frame(common, + RSI_IFTYPE_STATION, + STA_CONNECTED, bss->bssid, + bss->qos, bss->aid, 0, + vif); + } + q_num = MGMT_SOFT_Q; skb->priority = q_num; @@ -450,6 +462,10 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + q_num = MGMT_SOFT_Q; + skb->priority = q_num; + } if (rsi_prepare_data_desc(common, skb)) { rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n"); goto xmit_fail; diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 43d7d6c7ccb8..b7c54037f369 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -232,6 +232,18 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST; #define EAPOL_RETRY_CNT 15 xtend_desc->retry_cnt = EAPOL_RETRY_CNT; + + if (common->eapol4_confirm) + skb->priority = VO_Q; + else + rsi_set_len_qno(&data_desc->len_qno, + (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + if ((skb->len - header_size) == EAPOL4_PACKET_LEN) { + data_desc->misc_flags |= + RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = EAPOL4_CONFIRM; + } } data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); @@ -271,8 +283,11 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; struct ieee80211_vif *vif; struct ieee80211_tx_info *info; + struct skb_info *tx_params; struct ieee80211_bss_conf *bss; + struct ieee80211_hdr *wh; int status = -EINVAL; + u8 header_size; if (!skb) return 0; @@ -284,6 +299,9 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) goto err; vif = info->control.vif; bss = &vif->bss_conf; + tx_params = (struct skb_info *)info->driver_data; + header_size = tx_params->internal_hdr_size; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; if (((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) && diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 32f5cb46fd4f..5edc3a9d8eeb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -737,7 +737,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid, bss_conf->qos, bss_conf->aid, - NULL, 0, vif); + NULL, 0, + bss_conf->assoc_capability, vif); adapter->ps_info.dtim_interval_duration = bss->dtim_period; adapter->ps_info.listen_interval = conf->listen_interval; @@ -914,6 +915,17 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, key->cipher, sta_id, vif); + if (status) + return status; + + if (vif->type == NL80211_IFTYPE_STATION && key->key && + (key->cipher == WLAN_CIPHER_SUITE_WEP104 || + key->cipher == WLAN_CIPHER_SUITE_WEP40)) { + if (!rsi_send_block_unblock_frame(adapter->priv, false)) + adapter->priv->hw_data_qs_blocked = false; + } + + return 0; } /** @@ -1391,7 +1403,7 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); rsi_inform_bss_status(common, RSI_OPMODE_AP, 1, sta->addr, sta->wme, sta->aid, - sta, sta_idx, vif); + sta, sta_idx, 0, vif); if (common->key) { struct ieee80211_key_conf *key = common->key; @@ -1469,7 +1481,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, rsi_inform_bss_status(common, RSI_OPMODE_AP, 0, sta->addr, sta->wme, sta->aid, sta, sta_idx, - vif); + 0, vif); rsta->sta = NULL; rsta->sta_id = -1; for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 9207da09a4f7..0757adcb3f4a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -454,14 +454,10 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, * * Return: status: 0 on success, corresponding negative error code on failure. */ -static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, - enum opmode opmode, - u8 notify_event, - const unsigned char *bssid, - u8 qos_enable, - u16 aid, - u16 sta_id, - struct ieee80211_vif *vif) +int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, + u8 notify_event, const unsigned char *bssid, + u8 qos_enable, u16 aid, u16 sta_id, + struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; @@ -1328,6 +1324,7 @@ void rsi_inform_bss_status(struct rsi_common *common, u16 aid, struct ieee80211_sta *sta, u16 sta_id, + u16 assoc_cap, struct ieee80211_vif *vif) { if (status) { @@ -1342,10 +1339,10 @@ void rsi_inform_bss_status(struct rsi_common *common, vif); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common, sta, sta_id, vif); - if (opmode == RSI_OPMODE_STA) { - if (!rsi_send_block_unblock_frame(common, false)) - common->hw_data_qs_blocked = false; - } + if (opmode == RSI_OPMODE_STA && + !(assoc_cap & WLAN_CAPABILITY_PRIVACY) && + !rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; } else { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; @@ -1850,10 +1847,19 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) __func__); return rsi_handle_card_ready(common, msg); case TX_STATUS_IND: - if (msg[15] == PROBEREQ_CONFIRM) { + switch (msg[RSI_TX_STATUS_TYPE]) { + case PROBEREQ_CONFIRM: common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); + break; + case EAPOL4_CONFIRM: + if (msg[RSI_TX_STATUS]) { + common->eapol4_confirm = true; + if (!rsi_send_block_unblock_frame(common, + false)) + common->hw_data_qs_blocked = false; + } } break; case BEACON_EVENT_IND: diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index b81cdbf05e5b..a084f224bb03 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -287,6 +287,7 @@ struct rsi_common { struct timer_list roc_timer; struct ieee80211_vif *roc_vif; + bool eapol4_confirm; void *bt_adapter; }; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 6726e84a6773..5f946f31723f 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -33,6 +33,7 @@ #define WMM_SHORT_SLOT_TIME 9 #define SIFS_DURATION 16 +#define EAPOL4_PACKET_LEN 0x85 #define KEY_TYPE_CLEAR 0 #define RSI_PAIRWISE_KEY 1 #define RSI_GROUP_KEY 2 @@ -62,9 +63,12 @@ #define RX_DOT11_MGMT 0x02 #define TX_STATUS_IND 0x04 #define BEACON_EVENT_IND 0x08 +#define EAPOL4_CONFIRM 1 #define PROBEREQ_CONFIRM 2 #define CARD_READY_IND 0x00 #define SLEEP_NOTIFY_IND 0x06 +#define RSI_TX_STATUS_TYPE 15 +#define RSI_TX_STATUS 12 #define RSI_DELETE_PEER 0x0 #define RSI_ADD_PEER 0x1 @@ -660,10 +664,14 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); int rsi_send_vap_dynamic_update(struct rsi_common *common); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); +int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, + u8 notify_event, const unsigned char *bssid, + u8 qos_enable, u16 aid, u16 sta_id, + struct ieee80211_vif *vif); void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, u8 status, const u8 *addr, u8 qos_enable, u16 aid, struct ieee80211_sta *sta, u16 sta_id, - struct ieee80211_vif *vif); + u16 assoc_cap, struct ieee80211_vif *vif); void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb); int rsi_mac80211_attach(struct rsi_common *common); void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb, -- cgit v1.2.3 From 350fcdb834576162ab55519ed86572c00ea786a7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 6 Apr 2018 11:37:17 +0300 Subject: rsi: remove unecessary PTR_ALIGN()s The issue here is that we allocate "data" and then set "data = PTR_ALIGN(data, 8);" and then we free the aligned pointer instead of the original pointer. kmalloc() pointers are already ARCH_SLAB_MINALIGN aligned which is 8 or more on everything except certain Xtensa variants. We decided that if the Xtensa people ever notice a bug here then we'll tell them the bug is on their side. ;) Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index d76e69c0beaa..8ef00582c6ea 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -660,8 +660,6 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, if (!data) return -ENOMEM; - data = PTR_ALIGN(data, 8); - ms_addr = (addr >> 16); status = rsi_sdio_master_access_msword(adapter, ms_addr); if (status < 0) { @@ -724,8 +722,6 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, if (!data_aligned) return -ENOMEM; - data_aligned = PTR_ALIGN(data_aligned, 8); - if (size == 2) { *data_aligned = ((data << 16) | (data & 0xFFFF)); } else if (size == 1) { -- cgit v1.2.3 From 16d3bb7b2f37b917b3c55e83d230a199f5c51864 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:11 +0530 Subject: rsi: disable fw watchdog timer during reset Firmware's watchdog timer should be disabled as a part of reset sequence. This change fixes a firmware hang issue observed during stress tests. Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 7 +++++++ drivers/net/wireless/rsi/rsi_hal.h | 1 + drivers/net/wireless/rsi/rsi_usb.h | 1 + 3 files changed, 9 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 7b8bae313aa9..b065438f51b2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -687,6 +687,13 @@ static int rsi_reset_card(struct rsi_hw *adapter) */ msleep(100); + if (rsi_usb_master_reg_write(adapter, SWBL_REGOUT, + RSI_FW_WDT_DISABLE_REQ, + RSI_COMMON_REG_SIZE) < 0) { + rsi_dbg(ERR_ZONE, "Disabling firmware watchdog timer failed\n"); + goto fail; + } + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32); if (ret < 0) diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index d6c2baa56e9f..327638cdd30b 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -115,6 +115,7 @@ #define FW_FLASH_OFFSET 0x820 #define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200) #define MAX_DWORD_ALIGN_BYTES 64 +#define RSI_COMMON_REG_SIZE 2 struct bl_header { __le32 flags; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index a88d59295a98..b6fe79f0a513 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -26,6 +26,7 @@ #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 #define RSI_TA_HOLD_REG 0x22000844 +#define RSI_FW_WDT_DISABLE_REQ 0x69 #define USB_VENDOR_REGISTER_READ 0x15 #define USB_VENDOR_REGISTER_WRITE 0x16 -- cgit v1.2.3 From 8c1475bdfc3ae3b5ee46d7c026be5e4409e05ea2 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:12 +0530 Subject: rsi: device bootup parameter configuration Some device configuration flags need to be enabled while sending 'bootup params' internal frame to firmware. This patch takes care of it. Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_boot_params.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h index 238ee96434ec..ad903b22440e 100644 --- a/drivers/net/wireless/rsi/rsi_boot_params.h +++ b/drivers/net/wireless/rsi/rsi_boot_params.h @@ -46,7 +46,8 @@ (((TA_PLL_M_VAL_20 + 1) * 40) / \ ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1))) #define VALID_20 \ - (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS) + (WIFI_TAPLL_CONFIGS | WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | \ + WIFI_SWITCH_CLK_CONFIGS | BOOTUP_MODE_INFO | CRYSTAL_GOOD_TIME) #define UMAC_CLK_40BW \ (((TA_PLL_M_VAL_40 + 1) * 40) / \ ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1))) -- cgit v1.2.3 From cbbfdd6c700f7c840df73199fac0c77a31b6d944 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:13 +0530 Subject: rsi: use appropriate interface for power save configuration Power save request should be sent on station interface. Virtual interface which is connected should be preferred. This patch resolves device not entering power save problem in certain situations Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 5edc3a9d8eeb..77aa3bb357a0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -614,7 +614,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, /* Power save parameters */ if (changed & IEEE80211_CONF_CHANGE_PS) { - struct ieee80211_vif *vif; + struct ieee80211_vif *vif, *sta_vif = NULL; unsigned long flags; int i, set_ps = 1; @@ -628,13 +628,17 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, set_ps = 0; break; } + if ((vif->type == NL80211_IFTYPE_STATION || + vif->type == NL80211_IFTYPE_P2P_CLIENT) && + (!sta_vif || vif->bss_conf.assoc)) + sta_vif = vif; } - if (set_ps) { + if (set_ps && sta_vif) { spin_lock_irqsave(&adapter->ps_lock, flags); if (conf->flags & IEEE80211_CONF_PS) - rsi_enable_ps(adapter, vif); + rsi_enable_ps(adapter, sta_vif); else - rsi_disable_ps(adapter, vif); + rsi_disable_ps(adapter, sta_vif); spin_unlock_irqrestore(&adapter->ps_lock, flags); } } -- cgit v1.2.3 From a55e50f0672ec16c944b7e4f1168476661ea25a0 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:14 +0530 Subject: rsi: increase max supported aggregation subframes Maximum number of supported aggregation subframes has been increased to 8. This is the optimal number for the driver. Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 3 ++- drivers/net/wireless/rsi/rsi_mgmt.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 77aa3bb357a0..2a777435a735 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1955,7 +1955,8 @@ int rsi_mac80211_attach(struct rsi_common *common) hw->uapsd_queues = RSI_IEEE80211_UAPSD_QUEUES; hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; - hw->max_tx_aggregation_subframes = 6; + hw->max_tx_aggregation_subframes = RSI_MAX_TX_AGGR_FRMS; + hw->max_rx_aggregation_subframes = RSI_MAX_RX_AGGR_FRMS; rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); hw->rate_control_algorithm = "AARF"; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 5f946f31723f..14620935c925 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -225,6 +225,9 @@ #define RSI_WOW_DISCONNECT BIT(5) #endif +#define RSI_MAX_TX_AGGR_FRMS 8 +#define RSI_MAX_RX_AGGR_FRMS 8 + enum opmode { RSI_OPMODE_UNSUPPORTED = -1, RSI_OPMODE_AP = 0, -- cgit v1.2.3 From 3334306a086e6784ad32f395e5dcc9272ca0ce6e Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:15 +0530 Subject: rsi: parse TID from data frame correctly Currently TID is extracted by checking at specific offset in data frame. This approach doesn't work for some of the frames. This patch uses mac80211 API and do it correctly Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 3ca468b9f2b8..1f1b97220d43 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -432,7 +432,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) } } else { if (ieee80211_is_data_qos(wh->frame_control)) { - tid = (skb->data[24] & IEEE80211_QOS_TID); + u8 *qos = ieee80211_get_qos_ctl(wh); + + tid = *qos & IEEE80211_QOS_CTL_TID_MASK; skb->priority = TID_TO_WME_AC(tid); } else { tid = IEEE80211_NONQOS_TID; -- cgit v1.2.3 From 1e9c410f26a2ec3201b0f10141275afcbdaf6b69 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:16 +0530 Subject: rsi: enable power save by default for coex Power save is by default enabled for WLAN and BT coex mode. Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 2a777435a735..bb497bd63563 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -2008,6 +2008,9 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->iface_combinations = rsi_iface_combinations; wiphy->n_iface_combinations = ARRAY_SIZE(rsi_iface_combinations); + if (common->coex_mode > 1) + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + status = ieee80211_register_hw(hw); if (status) return status; -- cgit v1.2.3 From f0b147b8f1d27c9fa543704b21c67a7a3723c7d2 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 10 Apr 2018 20:34:17 +0530 Subject: rsi: advertise 5GHz support based on device capability Currently 5GHz gets advertised even for the device which supports only 2.4Ghz band. This patch fixes the issue Signed-off-by: Amitkumar Karwar Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index bb497bd63563..766d874cc6e2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1957,8 +1957,6 @@ int rsi_mac80211_attach(struct rsi_common *common) hw->max_tx_aggregation_subframes = RSI_MAX_TX_AGGR_FRMS; hw->max_rx_aggregation_subframes = RSI_MAX_RX_AGGR_FRMS; - rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); - rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); hw->rate_control_algorithm = "AARF"; SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); @@ -1979,10 +1977,15 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->available_antennas_rx = 1; wiphy->available_antennas_tx = 1; + + rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); wiphy->bands[NL80211_BAND_2GHZ] = &adapter->sbands[NL80211_BAND_2GHZ]; - wiphy->bands[NL80211_BAND_5GHZ] = - &adapter->sbands[NL80211_BAND_5GHZ]; + if (common->num_supp_bands > 1) { + rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); + wiphy->bands[NL80211_BAND_5GHZ] = + &adapter->sbands[NL80211_BAND_5GHZ]; + } /* AP Parameters */ wiphy->max_ap_assoc_sta = rsi_max_ap_stas[common->oper_mode - 1]; -- cgit v1.2.3 From f700546682a62a87a9615121a37ee7452dab4b76 Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Wed, 11 Apr 2018 12:13:31 +0530 Subject: rsi: fix nommu_map_sg overflow kernel panic Following overflow kernel panic is observed on some platforms while loading the driver. It is fixed if dynamically allocated memory is passed to SDIO instead of static one [ 927.513963] nommu_map_sg: overflow 17d54064ba7c+20 of device mask ffffffff [ 927.517712] Modules linked in: rsi_sdio(+) cmac bnep arc4 rsi_91x mac80211 cfg80211 btrsi rfcomm bluetooth ecdh_generic snd_soc_sst_bytcr_rt5660 [ 927.517861] CPU: 0 PID: 1624 Comm: insmod Tainted: G W 4.15.0-1000 #1 [ 927.517870] RIP: 0010:sdhci_send_command+0x5f0/0xa90 [sdhci] [ 927.517873] RSP: 0000:ffffac3fc064b6d8 EFLAGS: 00010086 [ 927.517895] Call Trace: [ 927.517908] ? __schedule+0x3cd/0x890 [ 927.517915] ? mod_timer+0x17b/0x3c0 [ 927.517922] sdhci_request+0x7c/0xf0 [sdhci] [ 927.517928] __mmc_start_request+0x5a/0x170 [ 927.517932] mmc_start_request+0x74/0x90 [ 927.517936] mmc_wait_for_req+0x87/0xe0 [ 927.517940] mmc_io_rw_extended+0x2fd/0x330 [ 927.517946] ? mmc_wait_data_done+0x30/0x30 [ 927.517951] sdio_io_rw_ext_helper+0x160/0x210 [ 927.517956] sdio_writesb+0x1d/0x20 [ 927.517966] rsi_sdio_write_register_multiple+0x68/0x110 [rsi_sdio] [ 927.517976] rsi_hal_device_init+0x357/0x910 [rsi_91x] [ 927.517983] ? rsi_hal_device_init+0x357/0x910 [rsi_91x] [ 927.517990] rsi_probe+0x2c6/0x450 [rsi_sdio] [ 927.517995] sdio_bus_probe+0xfc/0x110 [ 927.518000] driver_probe_device+0x2b3/0x490 [ 927.518005] __driver_attach+0xdf/0xf0 [ 927.518008] ? driver_probe_device+0x490/0x490 [ 927.518014] bus_for_each_dev+0x6c/0xc0 [ 927.518018] driver_attach+0x1e/0x20 [ 927.518021] bus_add_driver+0x1f4/0x270 [ 927.518028] ? rsi_sdio_ack_intr+0x50/0x50 [rsi_sdio] [ 927.518031] driver_register+0x60/0xe0 [ 927.518038] ? rsi_sdio_ack_intr+0x50/0x50 [rsi_sdio] [ 927.518041] sdio_register_driver+0x20/0x30 [ 927.518047] rsi_module_init+0x16/0x40 [rsi_sdio] Signed-off-by: Siva Rebbagondla Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 35 ++++++++++++++++++++------------- drivers/net/wireless/rsi/rsi_91x_sdio.c | 21 +++++++++++++------- drivers/net/wireless/rsi/rsi_sdio.h | 2 +- 3 files changed, 36 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index b7c54037f369..0761e61591bd 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -635,28 +635,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, u32 content_size) { struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops; - struct bl_header bl_hdr; + struct bl_header *bl_hdr; u32 write_addr, write_len; int status; - bl_hdr.flags = 0; - bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode); - bl_hdr.check_sum = cpu_to_le32( - *(u32 *)&flash_content[CHECK_SUM_OFFSET]); - bl_hdr.flash_start_address = cpu_to_le32( - *(u32 *)&flash_content[ADDR_OFFSET]); - bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); + bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL); + if (!bl_hdr) + return -ENOMEM; + + bl_hdr->flags = 0; + bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode); + bl_hdr->check_sum = + cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]); + bl_hdr->flash_start_address = + cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]); + bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); write_len = sizeof(struct bl_header); if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) { write_addr = PING_BUFFER_ADDRESS; status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } else { write_addr = PING_BUFFER_ADDRESS >> 16; @@ -665,20 +669,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto fail; } write_addr = RSI_SD_REQUEST_MASTER | (PING_BUFFER_ADDRESS & 0xFFFF); status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } - return 0; + status = 0; +fail: + kfree(bl_hdr); + return status; } static u32 read_flash_capacity(struct rsi_hw *adapter) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 8ef00582c6ea..f7f282098696 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1038,17 +1038,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, /*This function resets and re-initializes the chip.*/ static void rsi_reset_chip(struct rsi_hw *adapter) { - __le32 data; + u8 *data; u8 sdio_interrupt_status = 0; u8 request = 1; int ret; + data = kzalloc(sizeof(u32), GFP_KERNEL); + if (!data) + return; + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to write SDIO wakeup register\n", __func__); - return; + goto err; } msleep(20); ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, @@ -1056,7 +1060,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter) if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - return; + goto err; } rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", __func__, sdio_interrupt_status); @@ -1066,17 +1070,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return; + goto err; } - data = TA_HOLD_THREAD_VALUE; + put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER, - (u8 *)&data, 4)) { + data, 4)) { rsi_dbg(ERR_ZONE, "%s: Unable to hold Thread-Arch processor threads\n", __func__); - return; + goto err; } /* This msleep will ensure Thread-Arch processor to go to hold @@ -1097,6 +1101,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter) * read write operations to complete for chip reset. */ msleep(500); +err: + kfree(data); + return; } /** diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index ead8e7c4df3a..353dbdf31e75 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -87,7 +87,7 @@ enum sdio_interrupt_type { #define TA_SOFT_RST_CLR 0 #define TA_SOFT_RST_SET BIT(0) #define TA_PC_ZERO 0 -#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF) +#define TA_HOLD_THREAD_VALUE 0xF #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF) #define TA_BASE_ADDR 0x2200 #define MISC_CFG_BASE_ADDR 0x4105 -- cgit v1.2.3 From 78e450719c702784e42af6da912d3692fd3da0cb Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Wed, 11 Apr 2018 12:13:32 +0530 Subject: rsi: Fix 'invalid vdd' warning in mmc While performing cleanup, driver is messing with card->ocr value by not masking rocr against ocr_avail. Below panic is observed with some of the SDIO host controllers due to this. Issue is resolved by reverting incorrect modifications to vdd. [ 927.423821] mmc1: Invalid vdd 0x1f [ 927.423925] Modules linked in: rsi_sdio(+) cmac bnep arc4 rsi_91x mac80211 cfg80211 btrsi rfcomm bluetooth ecdh_generic [ 927.424073] CPU: 0 PID: 1624 Comm: insmod Tainted: G W 4.15.0-1000-caracalla #1 [ 927.424075] Hardware name: Dell Inc. Edge Gateway 3003/ , BIOS 01.00.06 01/22/2018 [ 927.424082] RIP: 0010:sdhci_set_power_noreg+0xdd/0x190[sdhci] [ 927.424085] RSP: 0018:ffffac3fc064b930 EFLAGS: 00010282 [ 927.424107] Call Trace: [ 927.424118] sdhci_set_power+0x5a/0x60 [sdhci] [ 927.424125] sdhci_set_ios+0x360/0x3b0 [sdhci] [ 927.424133] mmc_set_initial_state+0x92/0x120 [ 927.424137] mmc_power_up.part.34+0x33/0x1d0 [ 927.424141] mmc_power_up+0x17/0x20 [ 927.424147] mmc_sdio_runtime_resume+0x2d/0x50 [ 927.424151] mmc_runtime_resume+0x17/0x20 [ 927.424156] __rpm_callback+0xc4/0x200 [ 927.424161] ? idr_alloc_cyclic+0x57/0xd0 [ 927.424165] ? mmc_runtime_suspend+0x20/0x20 [ 927.424169] rpm_callback+0x24/0x80 [ 927.424172] ? mmc_runtime_suspend+0x20/0x20 [ 927.424176] rpm_resume+0x4b3/0x6c0 [ 927.424181] __pm_runtime_resume+0x4e/0x80 [ 927.424188] driver_probe_device+0x41/0x490 [ 927.424192] __driver_attach+0xdf/0xf0 [ 927.424196] ? driver_probe_device+0x490/0x490 [ 927.424201] bus_for_each_dev+0x6c/0xc0 [ 927.424205] driver_attach+0x1e/0x20 [ 927.424209] bus_add_driver+0x1f4/0x270 [ 927.424217] ? rsi_sdio_ack_intr+0x50/0x50 [rsi_sdio] [ 927.424221] driver_register+0x60/0xe0 [ 927.424227] ? rsi_sdio_ack_intr+0x50/0x50 [rsi_sdio] [ 927.424231] sdio_register_driver+0x20/0x30 [ 927.424237] rsi_module_init+0x16/0x40 [rsi_sdio] Signed-off-by: Siva Rebbagondla Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index f7f282098696..416981d99229 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -170,7 +170,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) int err; struct mmc_card *card = pfunction->card; struct mmc_host *host = card->host; - s32 bit = (fls(host->ocr_avail) - 1); u8 cmd52_resp; u32 clock, resp, i; u16 rca; @@ -190,7 +189,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) msleep(20); /* Initialize the SDIO card */ - host->ios.vdd = bit; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.power_mode = MMC_POWER_UP; -- cgit v1.2.3 From 8cf304a85378b54b8a86bf2ed130da56dd59b251 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 9 Apr 2018 15:51:31 +0800 Subject: rtlwifi: btcoex: remove identical statements within if-else branches Since the statements are identical, we can safely remove the statements. The commit 42e74946f016 ("rtlwifi: btcoexist: Fix if == else warnings in halbtc8821a1ant.c") had fixed the statements, but the commit c6821613e653 ("rtlwifi: btcoex: follow linux coding style") that converted coding style and upgraded btcoex didn't include the fix. Reported-by: Gustavo A. R. Silva Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8821a1ant.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 202597cf8915..b5d65872db84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1584,11 +1584,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, /* tdma and coex table */ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) - btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); - else - btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, @@ -1991,16 +1987,9 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 0, 1); - } else { - btc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 0, 1); - } + btc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 0, 1); } else { btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); -- cgit v1.2.3 From 5156b054bcdab19dc2681fb606011378e9dd37e2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 30 Mar 2018 16:12:23 -0500 Subject: mt7601u: phy: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Acked-by: Jakub Kicinski Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/phy.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index ca09a5d4305e..9a90f1f09fb0 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -795,6 +795,7 @@ mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate) switch (phy_mode) { case MT_PHY_TYPE_OFDM: tx_rate += 4; + /* fall through */ case MT_PHY_TYPE_CCK: reg = dev->rf_pa_mode[0]; break; -- cgit v1.2.3 From 2cb161094ef940f191cf60cd512783269572b4a7 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 3 Apr 2018 16:45:31 +0200 Subject: mt76x2: fix tssi initialization for 5GHz band Fix mcu initial configuration for tssi calibration on 5GHz band Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index fcc37eb7ce0b..f28c55746cea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -180,7 +180,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) if (mt76x2_channel_silent(dev)) return false; - if (chan->band == NL80211_BAND_2GHZ) + if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); if (mt76x2_ext_pa_enabled(dev, chan->band)) -- cgit v1.2.3 From 00dfae9a0ab7594b77d39725ebff677900695d70 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 3 Apr 2018 17:01:43 +0200 Subject: mt76x2: make mt76x2_mac_reset routine static Add static qualifier to mt76x2_mac_reset routine and remove the prototype from mt76x2_mac.h since it is used just in mt76x2_init.c Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2_mac.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 934c331d995e..359b105235b3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -228,7 +228,7 @@ mt76x2_init_beacon_offsets(struct mt76x2_dev *dev) mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); } -int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) +static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h index 8a8a25e32d5f..c048cd06df6b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h @@ -157,7 +157,6 @@ mt76x2_skb_tx_info(struct sk_buff *skb) return (void *) info->status.status_driver_data; } -int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard); int mt76x2_mac_start(struct mt76x2_dev *dev); void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force); void mt76x2_mac_resume(struct mt76x2_dev *dev); -- cgit v1.2.3 From 11b2a25f02b700027aa89ff27c807ec756df01f6 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:48 +0200 Subject: mt76: stop tx queues from the driver callback instead of common code Allows the driver to control whether to send a BlockAckReq after waking up. MT76x2 needs this set to true, for MT7603 (to be submitted later) it needs to be false. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 6 ++---- drivers/net/wireless/mediatek/mt76/mt76x2_main.c | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 4f30cdcd2b53..962ed8234065 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -541,12 +541,10 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) return; - if (ps) { + if (ps) set_bit(MT_WCID_FLAG_PS, &wcid->flags); - mt76_stop_tx_queues(dev, sta, true); - } else { + else clear_bit(MT_WCID_FLAG_PS, &wcid->flags); - } ieee80211_sta_ps_transition(sta, ps); dev->drv->sta_ps(dev, sta, ps); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 73c127f92613..81c58f865c64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -321,6 +321,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); int idx = msta->wcid.idx; + mt76_stop_tx_queues(&dev->mt76, sta, true); mt76x2_mac_wcid_set_drop(dev, idx, ps); } -- cgit v1.2.3 From 49149d3f1c6bfb314db1f386cf5067e5151ee0aa Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:49 +0200 Subject: mt76: add missing VHT maximum A-MPDU length capability Signficantly improves throughput with some clients Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 962ed8234065..ec531ce50d01 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -213,7 +213,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, vht_cap->vht_supported = true; vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_SHORT_GI_80; + IEEE80211_VHT_CAP_SHORT_GI_80 | + (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); return 0; } -- cgit v1.2.3 From 9f67c277a80f37a6594ae4ea3b78157773e1a23b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:50 +0200 Subject: mt76: toggle driver station powersave bit before notifying mac80211 Avoids race conditions from mac80211 enqueueing tx packets before the tx-drop bit is cleared Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index ec531ce50d01..eb49e0a6758c 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -547,8 +547,8 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) else clear_bit(MT_WCID_FLAG_PS, &wcid->flags); - ieee80211_sta_ps_transition(sta, ps); dev->drv->sta_ps(dev, sta, ps); + ieee80211_sta_ps_transition(sta, ps); } void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, -- cgit v1.2.3 From 80f28994f7d9c01e2af3d8584874cfc1904a7553 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:51 +0200 Subject: mt76: rework tx power handling There were a number of issues in the existing tx power handling code: 1. The EEPROM target power for chain 0 refers to the actual output power after the channel and bandwidth delta have been added to the initial channel gain. This means the delta values should not be added/subtracted for the rate power calculation 2. When power is reduced significantly, the initial channel gain underflows very quickly, while the per-rate power offsets are high. This miscalculation causes effective tx power to be increased when it should actually be lowered. Fix this by trying to adjust the channel gain to the lowest power from the rate table. In case of under- or overflow, compensate by adding an appropriate delta to the rate power values. This makes power configuration more accurate on a wider range of configurable values Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 67 ++++++++++++++++--------- 1 file changed, 42 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index f28c55746cea..a4280a9f6956 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -73,16 +73,6 @@ int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) return rssi; } -static u8 -mt76x2_txpower_check(int value) -{ - if (value < 0) - return 0; - if (value > 0x2f) - return 0x2f; - return value; -} - static void mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) { @@ -102,6 +92,26 @@ mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) r->all[i] = limit; } +static int +mt76x2_get_min_rate_power(struct mt76_rate_power *r) +{ + int i; + s8 ret = 0; + + for (i = 0; i < sizeof(r->all); i++) { + if (!r->all[i]) + continue; + + if (ret) + ret = min(ret, r->all[i]); + else + ret = r->all[i]; + } + + return ret; +} + + void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) { enum nl80211_chan_width width = dev->mt76.chandef.width; @@ -109,6 +119,7 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) struct mt76x2_tx_power_info txp; int txp_0, txp_1, delta = 0; struct mt76_rate_power t = {}; + int base_power, gain; mt76x2_get_power_info(dev, &txp, chan); @@ -117,26 +128,32 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) else if (width == NL80211_CHAN_WIDTH_80) delta = txp.delta_bw80; - if (txp.target_power > dev->txpower_conf) - delta -= txp.target_power - dev->txpower_conf; - mt76x2_get_rate_power(dev, &t, chan); - mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power + - txp.chain[0].delta); + mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power); mt76x2_limit_rate_power(&t, dev->txpower_conf); dev->txpower_cur = mt76x2_get_max_rate_power(&t); - mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power + - txp.chain[0].delta + delta)); - dev->target_power = txp.chain[0].target_power; - dev->target_power_delta[0] = txp.chain[0].delta + delta; - dev->target_power_delta[1] = txp.chain[1].delta + delta; - dev->rate_power = t; - txp_0 = mt76x2_txpower_check(txp.chain[0].target_power + - txp.chain[0].delta + delta); + base_power = mt76x2_get_min_rate_power(&t); + delta += base_power - txp.chain[0].target_power; + txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta; + txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta; + + gain = min(txp_0, txp_1); + if (gain < 0) { + base_power -= gain; + txp_0 -= gain; + txp_1 -= gain; + } else if (gain > 0x2f) { + base_power -= gain - 0x2f; + txp_0 = 0x2f; + txp_1 = 0x2f; + } - txp_1 = mt76x2_txpower_check(txp.chain[1].target_power + - txp.chain[1].delta + delta); + mt76x2_add_rate_power_offset(&t, -base_power); + dev->target_power = txp.chain[0].target_power; + dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power; + dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; + dev->rate_power = t; mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); -- cgit v1.2.3 From 07073a2768ac8ef1360f715a68c3f7b7d1f23850 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:52 +0200 Subject: mt76: fix potential sleep in atomic context Use cancel_delayed_work instead of cancel_delayed_work_sync Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index fcb208d1f276..cbac42cb536c 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -273,7 +273,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) spin_unlock_bh(&tid->lock); - cancel_delayed_work_sync(&tid->reorder_work); + cancel_delayed_work(&tid->reorder_work); } void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) -- cgit v1.2.3 From 18efed59fabc0d0f6dd9888b5f0e8102c8332685 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:53 +0200 Subject: mt76: set RX_FLAG_DUP_VALIDATED for A-MPDU reordered packets Required for fast-rx and allows mac80211 to skip an unnnecessary check. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index cbac42cb536c..6657ec8928de 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -169,6 +169,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) if (!tid) return; + status->flag |= RX_FLAG_DUP_VALIDATED; spin_lock_bh(&tid->lock); if (tid->stopped) -- cgit v1.2.3 From 1af83148a4fc31858383e8ee867f5b9f9c2432cd Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 3 Apr 2018 21:52:54 +0200 Subject: mt76: check qos ack policy before reordering packets Do not attempt to reorder packets not part of a BA session Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 6657ec8928de..dbf4057d2d3e 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -147,12 +147,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; u16 seqno, head, size; - u8 idx; + u8 ackp, idx; __skb_queue_tail(frames, skb); @@ -165,6 +166,12 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) return; } + /* not part of a BA session */ + ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; + if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && + ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + return; + tid = rcu_dereference(wcid->aggr[status->tid]); if (!tid) return; -- cgit v1.2.3 From cf7b411dcea8849e1802bd1ac35249b33050afed Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 3 Apr 2018 22:30:19 +0200 Subject: mt76x2: remove unnecessary MT_TX_ALC_CFG_4 configuration Remove unnecessary MT_TX_ALC_CFG_4 configuration since the register value will be properly set according to the usage of an external power amplifier Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index a4280a9f6956..79c698ada818 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -274,7 +274,6 @@ mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band) mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); } - mt76_wr(dev, MT_TX_ALC_CFG_4, 0); if (mt76x2_ext_pa_enabled(dev, band)) pa_mode_adj = 0x04000000; -- cgit v1.2.3 From bcb0f68ae2eda3f4e124aa25bd0f5ae7afaba33c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 4 Apr 2018 10:23:46 +0200 Subject: mt76x2: fix tx_alc_enabled check Fix mt76x2_temp_tx_alc_enabled routine since in order to enable tx_alc temperature compensation it necessary to take into account BIT(15) of MT_EE_TX_POWER_EXT_PA_5G eeprom info Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c | 6 +----- drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h | 6 ++++++ 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c index 5bb50027c1e8..95d5f7d888f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c @@ -609,17 +609,13 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) memset(t, 0, sizeof(*t)); - val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC)) + if (!mt76x2_temp_tx_alc_enabled(dev)) return -EINVAL; if (!mt76x2_ext_pa_enabled(dev, band)) return -EINVAL; val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; - if (!(val & BIT(7))) - return -EINVAL; - t->temp_25_ref = val & 0x7f; if (band == NL80211_BAND_5GHZ) { slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h index d79122728dca..aa0b0c040375 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h @@ -159,6 +159,12 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev); static inline bool mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) { + u16 val; + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + if (!(val & BIT(15))) + return false; + return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & MT_EE_NIC_CONF_1_TEMP_TX_ALC; } -- cgit v1.2.3 From b9e5d4feb4fc0ec0823e5da3bdea1c01d38f448e Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 4 Apr 2018 10:38:32 +0200 Subject: mt76x2: set default values in TX_ALC_CFG_{1, 2} for tempetaure compensation Initialize default values for temperature compensation in TX_ALC_CFG_{1,2} if tssi has been enabled Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 79c698ada818..038d1fe6c726 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -677,6 +677,14 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, sizeof(dev->cal.agc_gain_cur)); + /* init default values for temp compensation */ + if (mt76x2_tssi_enabled(dev)) { + mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + 0x38); + mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, + 0x38); + } + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); -- cgit v1.2.3 From b305a6ab02475f52ef604b36e4cecd3bf4aa5eb7 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 16 Apr 2018 13:56:17 +0200 Subject: mt7601u: use EWMA to calculate avg_rssi avr_rssi is not calculated correctly as we do not divide result by 256 (mt76 sum avg_rssi1 and avg_rssi2 and divide by 512). However dividing by 256 will make avg_rssi almost the same as last rssi value - not really an average. So use EWMA to calculate avg_rssi. I've chosen weight_rcp=4 to convergence quicker on signal strength changes. Signed-off-by: Stanislaw Gruszka Acked-by: Jakub Kicinski Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/mac.c | 4 ++-- drivers/net/wireless/mediatek/mt7601u/mt7601u.h | 5 ++++- drivers/net/wireless/mediatek/mt7601u/phy.c | 10 +++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index d55d7040a56d..148c36d3d2e5 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -453,7 +453,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, { dev->bcn_freq_off = rxwi->freq_off; dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + ewma_rssi_add(&dev->avg_rssi, -rssi); } static int @@ -503,7 +503,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, if (mt7601u_rx_is_our_beacon(dev, data)) mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi); else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + ewma_rssi_add(&dev->avg_rssi, -rssi); spin_unlock_bh(&dev->con_mon_lock); return len; diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h index 9233744451a9..db317d8c1652 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -23,6 +23,7 @@ #include #include #include +#include #include "regs.h" @@ -138,6 +139,8 @@ enum { MT7601U_STATE_MORE_STATS, }; +DECLARE_EWMA(rssi, 10, 4); + /** * struct mt7601u_dev - adapter structure * @lock: protects @wcid->tx_rate. @@ -220,7 +223,7 @@ struct mt7601u_dev { s8 bcn_freq_off; u8 bcn_phy_mode; - int avg_rssi; /* starts at 0 and converges */ + struct ewma_rssi avg_rssi; u8 agc_save; diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index 9a90f1f09fb0..9d2f9a776ef1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -975,6 +975,7 @@ void mt7601u_agc_restore(struct mt7601u_dev *dev) static void mt7601u_agc_tune(struct mt7601u_dev *dev) { u8 val = mt7601u_agc_default(dev); + long avg_rssi; if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) return; @@ -984,9 +985,12 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev) * Rssi updates are only on beacons and U2M so should work... */ spin_lock_bh(&dev->con_mon_lock); - if (dev->avg_rssi <= -70) + avg_rssi = ewma_rssi_read(&dev->avg_rssi); + WARN_ON_ONCE(avg_rssi == 0); + avg_rssi = -avg_rssi; + if (avg_rssi <= -70) val -= 0x20; - else if (dev->avg_rssi <= -60) + else if (avg_rssi <= -60) val -= 0x10; spin_unlock_bh(&dev->con_mon_lock); @@ -1102,7 +1106,7 @@ void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, /* Start/stop collecting beacon data */ spin_lock_bh(&dev->con_mon_lock); ether_addr_copy(dev->ap_bssid, info->bssid); - dev->avg_rssi = 0; + ewma_rssi_init(&dev->avg_rssi); dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; spin_unlock_bh(&dev->con_mon_lock); -- cgit v1.2.3 From 2019c39a2d6081e09436254fba320708e8127691 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 16 Apr 2018 13:56:18 +0200 Subject: mt7601u: run calibration works after finishing scanning When finishing scanning we switch to operational channel sill with SCANNING flag. This mean that we never perform calibration works after scanning. To fix the problem queue calibration works on .sw_scan_complete() routine. Signed-off-by: Stanislaw Gruszka Acked-by: Jakub Kicinski Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index 3c9ea40d9584..7b21016012c3 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -288,6 +288,12 @@ mt7601u_sw_scan_complete(struct ieee80211_hw *hw, mt7601u_agc_restore(dev); clear_bit(MT7601U_STATE_SCANNING, &dev->state); + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + if (dev->freq_cal.enabled) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); } static int -- cgit v1.2.3 From cc6603aaeebf75751f5c3b79bc38ab62a0a55e7e Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 17 Apr 2018 01:05:59 +0200 Subject: mt76x2: fix TXD_INFO bitmask definition Despite this issue is not harmful since it is not actually used in mt76 driver, fix DMA txinfo bitmask definition Fixes: 7bc04215a66b ('mt76: add driver code for MT76x2e') Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_dma.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h index 47f79d83fcb4..e9d426bbf91a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h @@ -19,7 +19,7 @@ #include "dma.h" -#define MT_TXD_INFO_LEN GENMASK(13, 0) +#define MT_TXD_INFO_LEN GENMASK(15, 0) #define MT_TXD_INFO_NEXT_VLD BIT(16) #define MT_TXD_INFO_TX_BURST BIT(17) #define MT_TXD_INFO_80211 BIT(19) @@ -27,9 +27,8 @@ #define MT_TXD_INFO_CSO BIT(21) #define MT_TXD_INFO_WIV BIT(24) #define MT_TXD_INFO_QSEL GENMASK(26, 25) -#define MT_TXD_INFO_TCO BIT(29) -#define MT_TXD_INFO_UCO BIT(30) -#define MT_TXD_INFO_ICO BIT(31) +#define MT_TXD_INFO_DPORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) #define MT_RX_FCE_INFO_LEN GENMASK(13, 0) #define MT_RX_FCE_INFO_SELF_GEN BIT(15) -- cgit v1.2.3 From c7d852e301d834949e570920db808201bdc51a22 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Tue, 24 Apr 2018 15:32:53 +0300 Subject: qed: Fix copying 2 strings The strscpy() was a recent fix (net: qed: use correct strncpy() size) to prevent passing the length of the source buffer to strncpy() and guarantee null termination. It misses the goal of overwriting only the first 3 characters in "???_BIG_RAM" and "???_RAM" while keeping the rest of the string. Use strncpy() with the length of 3, without null termination. Signed-off-by: Denis Bolotin Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index b3211c7d38c2..39124b594a36 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -419,6 +419,7 @@ struct phy_defs { #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 +#define BIG_RAM_NAME_LEN 3 #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) @@ -3650,8 +3651,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; - strscpy(type_name, big_ram->instance_name, sizeof(type_name)); - strscpy(mem_name, big_ram->instance_name, sizeof(mem_name)); + strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, -- cgit v1.2.3 From 080aaddae5b378ebb0f06d297bf05cf4ec5b47ac Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 24 Apr 2018 12:39:45 +0100 Subject: fsl/fman_port: remove redundant check on port->rev_info.major The check port->rev_info.major >= 6 is being performed twice, thus the inner second check is always true and is redundant, hence it can be removed. Detected by cppcheck. drivers/net/ethernet/freescale/fman/fman_port.c:1394]: (warning) Identical inner 'if' condition is always true. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_port.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 6552d68ea6e1..ce6e24c74978 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1391,12 +1391,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata * workaround */ - if (port->rev_info.major >= 6) { - u32 reg; + u32 reg; - reg = 0x00001013; - iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); - } + reg = 0x00001013; + iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); } return 0; -- cgit v1.2.3 From 16f4faa4f06ff3b4e214922d55ac33ab6e2bdbdc Mon Sep 17 00:00:00 2001 From: Srinivas Jampala Date: Tue, 24 Apr 2018 10:23:27 -0700 Subject: liquidio: Swap VF representor Tx and Rx statistics Swap VF representor tx and rx interface statistics since it is a virtual switchdev port and tx for VM should be rx for VF representor and vice-versa. Signed-off-by: Srinivas Jampala Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 2adafa366d3f..ddd7431579f4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -201,13 +201,14 @@ lio_vf_rep_get_stats64(struct net_device *dev, { struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); - stats64->tx_packets = vf_rep->stats.tx_packets; - stats64->tx_bytes = vf_rep->stats.tx_bytes; - stats64->tx_dropped = vf_rep->stats.tx_dropped; - - stats64->rx_packets = vf_rep->stats.rx_packets; - stats64->rx_bytes = vf_rep->stats.rx_bytes; - stats64->rx_dropped = vf_rep->stats.rx_dropped; + /* Swap tx and rx stats as VF rep is a switch port */ + stats64->tx_packets = vf_rep->stats.rx_packets; + stats64->tx_bytes = vf_rep->stats.rx_bytes; + stats64->tx_dropped = vf_rep->stats.rx_dropped; + + stats64->rx_packets = vf_rep->stats.tx_packets; + stats64->rx_bytes = vf_rep->stats.tx_bytes; + stats64->rx_dropped = vf_rep->stats.tx_dropped; } static int -- cgit v1.2.3 From 9c9e53233c281ed6c00425b165c4dd7fd0881cd5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:22:36 -0700 Subject: nfp: bpf: remove double space Whitespace cleanup - remove double space. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 29b4e5f8c102..9cc638718272 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1400,7 +1400,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (!load_lm_ptr) return 0; - emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); wrp_nops(nfp_prog, 3); return 0; -- cgit v1.2.3 From 6c59500c2dbfae0e3c90854ef443948d2889495d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:22:37 -0700 Subject: nfp: bpf: optimize add/sub of a negative constant NFP instruction set can fit small immediates into the instruction. Negative integers, however, will never fit because they will have highest bit set. If we swap the ALU op between ADD and SUB and negate the constant we have a better chance of fitting small negative integers into the instruction itself and saving one or two cycles. immed[gprB_21, 0xfffffffc] alu[gprA_4, gprA_4, +, gprB_21], gpr_wrboth immed[gprB_21, 0xffffffff] alu[gprA_5, gprA_5, +carry, gprB_21], gpr_wrboth now becomes: alu[gprA_4, gprA_4, -, 4], gpr_wrboth alu[gprA_5, gprA_5, -carry, 0], gpr_wrboth Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 9cc638718272..a5590988fc69 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -2777,6 +2777,40 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) } } +/* abs(insn.imm) will fit better into unrestricted reg immediate - + * convert add/sub of a negative number into a sub/add of a positive one. + */ +static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + struct bpf_insn insn = meta->insn; + + if (meta->skip) + continue; + + if (BPF_CLASS(insn.code) != BPF_ALU && + BPF_CLASS(insn.code) != BPF_ALU64) + continue; + if (BPF_SRC(insn.code) != BPF_K) + continue; + if (insn.imm >= 0) + continue; + + if (BPF_OP(insn.code) == BPF_ADD) + insn.code = BPF_CLASS(insn.code) | BPF_SUB; + else if (BPF_OP(insn.code) == BPF_SUB) + insn.code = BPF_CLASS(insn.code) | BPF_ADD; + else + continue; + + meta->insn.code = insn.code | BPF_K; + + meta->insn.imm = -insn.imm; + } +} + /* Remove masking after load since our load guarantees this is not needed */ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) { @@ -3212,6 +3246,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { nfp_bpf_opt_reg_init(nfp_prog); + nfp_bpf_opt_neg_add_sub(nfp_prog); nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); nfp_bpf_opt_ldst_gather(nfp_prog); -- cgit v1.2.3 From 61dd8f0007799e88d35624f63e24e98a978df9d9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:22:38 -0700 Subject: nfp: bpf: tabularize generations of compare operations There are quite a few compare instructions now, use a table to translate BPF instruction code to NFP instruction parameters instead of parameterizing helpers. This saves LOC and makes future extensions easier. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 168 ++++++++++----------------- 1 file changed, 61 insertions(+), 107 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index a5590988fc69..5b8da7a67df4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1214,45 +1214,79 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } -static int -wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum br_mask br_mask, bool swap) +static const struct jmp_code_map { + enum br_mask br_mask; + bool swap; +} jmp_code_map[] = { + [BPF_JGT >> 4] = { BR_BLO, true }, + [BPF_JGE >> 4] = { BR_BHS, false }, + [BPF_JLT >> 4] = { BR_BLO, false }, + [BPF_JLE >> 4] = { BR_BHS, true }, + [BPF_JSGT >> 4] = { BR_BLT, true }, + [BPF_JSGE >> 4] = { BR_BGE, false }, + [BPF_JSLT >> 4] = { BR_BLT, false }, + [BPF_JSLE >> 4] = { BR_BGE, true }, +}; + +static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) +{ + unsigned int op; + + op = BPF_OP(meta->insn.code) >> 4; + /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ + if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || + !jmp_code_map[op].br_mask, + "no code found for jump instruction")) + return NULL; + + return &jmp_code_map[op]; +} + +static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ + const struct jmp_code_map *code; u8 reg = insn->dst_reg * 2; swreg tmp_reg; + code = nfp_jmp_code_get(meta); + if (!code) + return -EINVAL; + tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); - if (!swap) + if (!code->swap) emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); else emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); - if (!swap) + if (!code->swap) emit_alu(nfp_prog, reg_none(), reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); else emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); - emit_br(nfp_prog, br_mask, insn->off, 0); + emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; } -static int -wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum br_mask br_mask, bool swap) +static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; + const struct jmp_code_map *code; u8 areg, breg; + code = nfp_jmp_code_get(meta); + if (!code) + return -EINVAL; + areg = insn->dst_reg * 2; breg = insn->src_reg * 2; - if (swap) { + if (code->swap) { areg ^= breg; breg ^= areg; areg ^= breg; @@ -1261,7 +1295,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); emit_alu(nfp_prog, reg_none(), reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); - emit_br(nfp_prog, br_mask, insn->off, 0); + emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; } @@ -2283,46 +2317,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } -static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); -} - -static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); -} - -static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); -} - -static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); -} - -static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); -} - -static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); -} - -static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); -} - -static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); -} - static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2392,46 +2386,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } -static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); -} - -static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); -} - -static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); -} - -static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); -} - -static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); -} - -static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); -} - -static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); -} - -static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); -} - static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -2520,25 +2474,25 @@ static const instr_cb_t instr_cb[256] = { [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, [BPF_JMP | BPF_JA | BPF_K] = jump, [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, - [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, - [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, - [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, - [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, - [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, - [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, - [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, - [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, + [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, - [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, - [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, - [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, - [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, - [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, - [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, - [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, - [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, + [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, -- cgit v1.2.3 From 7bdc97be9075c074be0f0aa9c59a8d2238224743 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:22:39 -0700 Subject: nfp: bpf: optimize comparisons to negative constants Comparison instruction requires a subtraction. If the constant is negative we are more likely to fit it into a NFP instruction directly if we change the sign and use addition. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 42 +++++++++++++++++++-------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 6 +++- 2 files changed, 35 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 5b8da7a67df4..65f0791cae0c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1247,6 +1247,7 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ const struct jmp_code_map *code; + enum alu_op alu_op, carry_op; u8 reg = insn->dst_reg * 2; swreg tmp_reg; @@ -1254,19 +1255,22 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (!code) return -EINVAL; + alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; + carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; + tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); if (!code->swap) - emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); + emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); else - emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); + emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); if (!code->swap) emit_alu(nfp_prog, reg_none(), - reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); + reg_a(reg + 1), carry_op, tmp_reg); else emit_alu(nfp_prog, reg_none(), - tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); + tmp_reg, carry_op, reg_a(reg + 1)); emit_br(nfp_prog, code->br_mask, insn->off, 0); @@ -2745,21 +2749,35 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) continue; if (BPF_CLASS(insn.code) != BPF_ALU && - BPF_CLASS(insn.code) != BPF_ALU64) + BPF_CLASS(insn.code) != BPF_ALU64 && + BPF_CLASS(insn.code) != BPF_JMP) continue; if (BPF_SRC(insn.code) != BPF_K) continue; if (insn.imm >= 0) continue; - if (BPF_OP(insn.code) == BPF_ADD) - insn.code = BPF_CLASS(insn.code) | BPF_SUB; - else if (BPF_OP(insn.code) == BPF_SUB) - insn.code = BPF_CLASS(insn.code) | BPF_ADD; - else - continue; + if (BPF_CLASS(insn.code) == BPF_JMP) { + switch (BPF_OP(insn.code)) { + case BPF_JGE: + case BPF_JSGE: + case BPF_JLT: + case BPF_JSLT: + meta->jump_neg_op = true; + break; + default: + continue; + } + } else { + if (BPF_OP(insn.code) == BPF_ADD) + insn.code = BPF_CLASS(insn.code) | BPF_SUB; + else if (BPF_OP(insn.code) == BPF_SUB) + insn.code = BPF_CLASS(insn.code) | BPF_ADD; + else + continue; - meta->insn.code = insn.code | BPF_K; + meta->insn.code = insn.code | BPF_K; + } meta->insn.imm = -insn.imm; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 4981c8944ca3..68b5d326483d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -236,6 +236,7 @@ struct nfp_bpf_reg_state { * @xadd_over_16bit: 16bit immediate is not guaranteed * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions + * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions @@ -264,7 +265,10 @@ struct nfp_insn_meta { bool xadd_maybe_16bit; }; /* jump */ - struct nfp_insn_meta *jmp_dst; + struct { + struct nfp_insn_meta *jmp_dst; + bool jump_neg_op; + }; /* function calls */ struct { u32 func_id; -- cgit v1.2.3 From d6c38be09af0c2805cb858e3a20a2b29b2c04735 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 2 Apr 2018 20:31:22 +0800 Subject: mwifiex: uap: filter duplicate ERP IE Firmware parse and attach ERP IE from bss configuration, do not set again from tail IE. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/ie.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 922e3d69fd84..b10baacb51c9 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -349,6 +349,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, case WLAN_EID_SUPP_RATES: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: + case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: -- cgit v1.2.3 From c1003538bf6b28a4bae8e511672fa5abaabe683c Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 2 Apr 2018 20:31:23 +0800 Subject: mwifiex: uap: support cfg80211 ignore_broadcast_ssid=2 Firmware already support hidden ssid and keep ssid length, Open the capability in driver. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 7f7e9de2db1c..4857b75e54a7 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1979,7 +1979,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, bss_cfg->bcast_ssid_ctl = 0; break; case NL80211_HIDDEN_SSID_ZERO_CONTENTS: - /* firmware doesn't support this type of hidden SSID */ + bss_cfg->bcast_ssid_ctl = 2; + break; default: kfree(bss_cfg); return -EINVAL; -- cgit v1.2.3 From 01eca2842874b9a85b7cd1e1b0e5b34a5d53a21f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 5 Apr 2018 14:17:19 +0300 Subject: mwifiex: pcie: tighten a check in mwifiex_pcie_process_event_ready() If "evt_len" is 1 then we try to memcpy() negative 3 bytes and it would cause memory corruption. Fixes: d930faee141b ("mwifiex: add support for Marvell pcie8766 chipset") Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 97a6199692ab..7538543d46fa 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1881,7 +1881,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, EVENT, "info: Event length: %d\n", evt_len); - if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE)) + if (evt_len > MWIFIEX_EVENT_HEADER_LEN && + evt_len < MAX_EVENT_SIZE) memcpy(adapter->event_body, skb_cmd->data + MWIFIEX_EVENT_HEADER_LEN, evt_len - MWIFIEX_EVENT_HEADER_LEN); -- cgit v1.2.3 From 1f589e2510d5df1192dca7c089103a2cbd028101 Mon Sep 17 00:00:00 2001 From: Dan Haab Date: Tue, 3 Apr 2018 10:21:56 +0200 Subject: brcmfmac: add support for BCM4366E chipset BCM4366E is a wireless chipset with a BCM43664 ChipCommon. It's supported by the same firmware as 4366c0. Signed-off-by: Dan Haab [arend: rebase patch and remove unnecessary definition] Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 1 + drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h | 1 + 3 files changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 3b829fed8631..927d62b3d41b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -689,6 +689,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43525_CHIP_ID: case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: + case BRCM_CC_43664_CHIP_ID: return 0x200000; case CY_CC_4373_CHIP_ID: return 0x160000; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 091c191ce259..6e25ff94dd07 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -75,6 +75,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), + BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), }; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 57544a3a3ce4..686f7a85a045 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -57,6 +57,7 @@ #define BRCM_CC_43602_CHIP_ID 43602 #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 +#define BRCM_CC_43664_CHIP_ID 43664 #define BRCM_CC_4371_CHIP_ID 0x4371 #define CY_CC_4373_CHIP_ID 0x4373 -- cgit v1.2.3 From 863683cfbbfcd48ae88e2af880f52b91332f1f26 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 4 Apr 2018 19:09:44 -0500 Subject: brcmsmac: phy_lcn: remove duplicate code Remove and refactor some code in order to avoid having identical code for different branches. Notice that this piece of code hasn't been modified since 2011. Addresses-Coverity-ID: 1226756 ("Identical code for different branches") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 93d4cde0eb31..9d830d27b229 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -3388,13 +3388,8 @@ void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode) u8 phybw40; phybw40 = CHSPEC_IS40(pi->radio_chanspec); - if (LCNREV_LT(pi->pubpi.phy_rev, 2)) { - mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); - mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); - } else { - mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); - mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); - } + mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); + mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); if (phybw40 == 0) { mod_phy_reg((pi), 0x410, -- cgit v1.2.3 From 9d81ecde4dce37cd9e147c24c9bc0cd1d4c259d5 Mon Sep 17 00:00:00 2001 From: Xose Vazquez Perez Date: Thu, 5 Apr 2018 14:22:32 +0200 Subject: rt2x00: rt2800: add antenna diversity for RT5370G RT5370G has hardware RX antenna diversity like RT5390R. Based on DPO_RT5572_LinuxSTA_2.6.1.3_20121022.tar.bz2 manufacturer driver: https://d86o2zu8ugzlg.cloudfront.net/mediatek-craft/drivers/DPO_RT5572_LinuxSTA_2.6.1.3_20121022.tar.bz2 Cc: Stanislaw Gruszka Cc: Helmut Schaa Cc: Gertjan van Wingerde Cc: Jakub Kicinski Cc: wireless newbie Cc: Kalle Valo Cc: linux wireless ml Signed-off-by: Xose Vazquez Perez Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 1 + drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 6a8c93fb6a43..7a133e94b1bf 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -94,6 +94,7 @@ #define REV_RT3390E 0x0211 #define REV_RT3593E 0x0211 #define REV_RT5390F 0x0502 +#define REV_RT5370G 0x0503 #define REV_RT5390R 0x1502 #define REV_RT5592C 0x0221 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 429d07b651dd..e827dc522580 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -6220,8 +6220,9 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } - /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + /* These chips have hardware RX antenna diversity */ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || + rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ @@ -8748,7 +8749,9 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00dev->default_ant.rx = ANTENNA_A; } - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + /* These chips have hardware RX antenna diversity */ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || + rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */ rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */ } -- cgit v1.2.3 From 3763770044640caeb1101cdea40697cc0814403c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 5 Apr 2018 10:49:49 -0500 Subject: qtnfmac: pearl: pcie: fix memory leak in qtnf_fw_work_handler In case memory resources for fw were succesfully allocated, release them before jumping to fw_load_fail. Addresses-Coverity-ID: 1466092 ("Resource leak") Fixes: c3b2f7ca4186 ("qtnfmac: implement asynchronous firmware loading") Signed-off-by: Gustavo A. R. Silva Reviewed-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index f117904d9120..6c1e139bb8f7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -1185,6 +1185,10 @@ static void qtnf_fw_work_handler(struct work_struct *work) if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY, QTN_FW_DL_TIMEOUT_MS)) { pr_err("card is not ready\n"); + + if (!flashboot) + release_firmware(fw); + goto fw_load_fail; } -- cgit v1.2.3 From b056b83c06e1b01b7091ba81c5883038a0fc2f46 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:15:58 -0400 Subject: ixgbe: Drop support for macvlan specific unicast lists Drop the code for handling macvlan specific unicast lists. It isn't needed since we don't take any efforts to maintain it when we bring the interface up and it takes the slow path anyway. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 31 --------------------------- 1 file changed, 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 51e7d82a5860..c8f59430a5a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4900,36 +4900,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } -/** - * ixgbe_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * @vfn: pool to associate with unicast addresses - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) - return -ENOMEM; - - if (!netdev_uc_empty(netdev)) { - struct netdev_hw_addr *ha; - netdev_for_each_uc_addr(ha, netdev) { - ixgbe_del_mac_filter(adapter, ha->addr, vfn); - ixgbe_add_mac_filter(adapter, ha->addr, vfn); - count++; - } - } - return count; -} - static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -5328,7 +5298,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, vmolr |= IXGBE_VMOLR_ROMPE; hw->mac.ops.update_mc_addr_list(hw, dev); } - ixgbe_write_uc_addr_list(adapter->netdev, pool); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); } -- cgit v1.2.3 From 7d775f63470c3b6ddf34c770c973293ab925a7bb Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:03 -0400 Subject: macvlan: Rename fwd_priv to accel_priv and add accessor function This change renames the fwd_priv member to accel_priv as this more accurately reflects the actual purpose of this value. In addition I am adding an accessor which will allow us to further abstract this in the future if needed. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 ++++----- drivers/net/macvlan.c | 31 ++++++++++++++++----------- include/linux/if_macvlan.h | 8 ++++++- 3 files changed, 30 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c8f59430a5a1..0fbeb2f48711 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5398,10 +5398,9 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) { if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); - if (dfwd->fwd_priv) + if (vadapter) ixgbe_fwd_ring_up(upper, vadapter); } @@ -8983,13 +8982,12 @@ struct upper_walk_data { static int get_macvlan_queue(struct net_device *upper, void *_data) { if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); struct upper_walk_data *data = _data; struct ixgbe_adapter *adapter = data->adapter; int ifindex = data->ifindex; - if (vadapter && vadapter->netdev->ifindex == ifindex) { + if (vadapter && upper->ifindex == ifindex) { data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; data->action = data->queue; return 1; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 725f4b4afc6d..7ddc94ff4109 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -559,9 +559,9 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, if (unlikely(netpoll_tx_running(dev))) return macvlan_netpoll_send_skb(vlan, skb); - if (vlan->fwd_priv) { + if (vlan->accel_priv) { skb->dev = vlan->lowerdev; - ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); + ret = dev_queue_xmit_accel(skb, vlan->accel_priv); } else { ret = macvlan_queue_xmit(skb, dev); } @@ -613,16 +613,23 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } + err = -EBUSY; + if (macvlan_addr_busy(vlan->port, dev->dev_addr)) + goto out; + + /* Attempt to populate accel_priv which is used to offload the L2 + * forwarding requests for unicast packets. + */ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { - vlan->fwd_priv = + vlan->accel_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); /* If we get a NULL pointer back, or if we get an error * then we should just fall through to the non accelerated path */ - if (IS_ERR_OR_NULL(vlan->fwd_priv)) { - vlan->fwd_priv = NULL; - } else + if (IS_ERR_OR_NULL(vlan->accel_priv)) + vlan->accel_priv = NULL; + else return 0; } @@ -655,10 +662,10 @@ clear_multi: del_unicast: dev_uc_del(lowerdev, dev->dev_addr); out: - if (vlan->fwd_priv) { + if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, - vlan->fwd_priv); - vlan->fwd_priv = NULL; + vlan->accel_priv); + vlan->accel_priv = NULL; } return err; } @@ -668,10 +675,10 @@ static int macvlan_stop(struct net_device *dev) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (vlan->fwd_priv) { + if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, - vlan->fwd_priv); - vlan->fwd_priv = NULL; + vlan->accel_priv); + vlan->accel_priv = NULL; return 0; } diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 4cb7aeeafce0..c5106ce8273a 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -21,7 +21,7 @@ struct macvlan_dev { struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; - void *fwd_priv; + void *accel_priv; struct vlan_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); @@ -86,4 +86,10 @@ macvlan_dev_real_dev(const struct net_device *dev) } #endif +static inline void *macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->accel_priv; +} #endif /* _LINUX_IF_MACVLAN_H */ -- cgit v1.2.3 From 81d4e91cd599ed7fd378ca5463d6d9b05214b8b2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:09 -0400 Subject: macvlan: Use software path for offloaded local, broadcast, and multicast traffic This change makes it so that we use a software path for packets that are going to be locally switched between two macvlan interfaces on the same device. In addition we resort to software replication of broadcast and multicast packets instead of offloading that to hardware. The general idea is that using the device for east/west traffic local to the system is extremely inefficient. We can only support up to whatever the PCIe limit is for any given device so this caps us at somewhere around 20G for devices supported by ixgbe. This is compounded even further when you take broadcast and multicast into account as a single 10G port can come to a crawl as a packet is replicated up to 60+ times in some cases. In order to get away from that I am implementing changes so that we handle broadcast/multicast replication and east/west local traffic all in software. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 4 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 39 ++++++-------------- drivers/net/macvlan.c | 47 +++++++++++-------------- 3 files changed, 33 insertions(+), 57 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 45793491d4ba..ee645bacfe93 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1254,7 +1254,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) glort = l2_accel->dglort + 1 + i; hw->mac.ops.update_xcast_mode(hw, glort, - FM10K_XCAST_MODE_MULTI); + FM10K_XCAST_MODE_NONE); fm10k_queue_mac_request(interface, glort, sdev->dev_addr, hw->mac.default_vid, true); @@ -1515,7 +1515,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, if (fm10k_host_mbx_ready(interface)) { hw->mac.ops.update_xcast_mode(hw, glort, - FM10K_XCAST_MODE_MULTI); + FM10K_XCAST_MODE_NONE); fm10k_queue_mac_request(interface, glort, sdev->dev_addr, hw->mac.default_vid, true); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0fbeb2f48711..4d4fa46a1e7c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4219,7 +4219,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 reg_offset, vf_shift; + u16 pool = adapter->num_rx_pools; + u32 reg_offset, vf_shift, vmolr; u32 gcr_ext, vmdctl; int i; @@ -4233,6 +4234,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) vmdctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + vmolr = IXGBE_VMOLR_AUPE; + while (pool--) + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); + vf_shift = VMDQ_P(0) % 32; reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; @@ -5279,28 +5287,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } -static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, - struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmolr; - - /* No unicast promiscuous support for VMDQ devices. */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); - - /* clear the affected bit */ - vmolr &= ~IXGBE_VMOLR_MPE; - - if (dev->flags & IFF_ALLMULTI) { - vmolr |= IXGBE_VMOLR_MPE; - } else { - vmolr |= IXGBE_VMOLR_ROMPE; - hw->mac.ops.update_mc_addr_list(hw, dev); - } - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); -} - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from @@ -5384,10 +5370,8 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, */ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, VMDQ_P(accel->pool)); - if (err >= 0) { - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + if (err >= 0) return 0; - } for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = NULL; @@ -9832,9 +9816,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, VMDQ_P(accel->pool)); - /* disable ability to receive packets for this pool */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0); - /* Allow remaining Rx packets to get flushed out of the * Rx FIFO before we drop the netdev for the ring. */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 7ddc94ff4109..adde8fc45588 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -514,6 +514,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; + void *accel_priv = NULL; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; @@ -533,9 +534,14 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) } } + /* For packets that are non-multicast and not bridged we will pass + * the necessary information so that the lowerdev can distinguish + * the source of the packets via the accel_priv value. + */ + accel_priv = vlan->accel_priv; xmit_world: skb->dev = vlan->lowerdev; - return dev_queue_xmit(skb); + return dev_queue_xmit_accel(skb, accel_priv); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) @@ -552,19 +558,14 @@ static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, str static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { + struct macvlan_dev *vlan = netdev_priv(dev); unsigned int len = skb->len; int ret; - struct macvlan_dev *vlan = netdev_priv(dev); if (unlikely(netpoll_tx_running(dev))) return macvlan_netpoll_send_skb(vlan, skb); - if (vlan->accel_priv) { - skb->dev = vlan->lowerdev; - ret = dev_queue_xmit_accel(skb, vlan->accel_priv); - } else { - ret = macvlan_queue_xmit(skb, dev); - } + ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *pcpu_stats; @@ -620,26 +621,20 @@ static int macvlan_open(struct net_device *dev) /* Attempt to populate accel_priv which is used to offload the L2 * forwarding requests for unicast packets. */ - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) vlan->accel_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); - /* If we get a NULL pointer back, or if we get an error - * then we should just fall through to the non accelerated path - */ - if (IS_ERR_OR_NULL(vlan->accel_priv)) - vlan->accel_priv = NULL; - else - return 0; + /* If earlier attempt to offload failed, or accel_priv is not + * populated we must add the unicast address to the lower device. + */ + if (IS_ERR_OR_NULL(vlan->accel_priv)) { + vlan->accel_priv = NULL; + err = dev_uc_add(lowerdev, dev->dev_addr); + if (err < 0) + goto out; } - err = -EBUSY; - if (macvlan_addr_busy(vlan->port, dev->dev_addr)) - goto out; - - err = dev_uc_add(lowerdev, dev->dev_addr); - if (err < 0) - goto out; if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) @@ -660,13 +655,14 @@ clear_multi: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); del_unicast: - dev_uc_del(lowerdev, dev->dev_addr); -out: if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; + } else { + dev_uc_del(lowerdev, dev->dev_addr); } +out: return err; } @@ -679,7 +675,6 @@ static int macvlan_stop(struct net_device *dev) lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; - return 0; } dev_uc_unsync(lowerdev, dev); -- cgit v1.2.3 From 8d80ac43dea3877a945d15dc09a6c6efd4dc27a3 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:14 -0400 Subject: ixgbe/fm10k: Drop tracking stats for macvlan broadcast/multicast Drop dead code now that we shouldn't be receiving broadcast or multicast frames on the queues associated to the macvlan netdev. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 7 +++---- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index df8607097e4a..c51d61f5f715 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -445,15 +445,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring, l2_accel = NULL; } - skb->protocol = eth_type_trans(skb, dev); - /* Record Rx queue, or update macvlan statistics */ if (!l2_accel) skb_record_rx_queue(skb, rx_ring->queue_index); else macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, - (skb->pkt_type == PACKET_BROADCAST) || - (skb->pkt_type == PACKET_MULTICAST)); + false); + + skb->protocol = eth_type_trans(skb, dev); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d4fa46a1e7c..2c648214148e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1768,15 +1768,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) ixgbe_ipsec_rx(rx_ring, rx_desc, skb); - skb->protocol = eth_type_trans(skb, dev); - /* record Rx queue, or update MACVLAN statistics */ if (netif_is_ixgbe(dev)) skb_record_rx_queue(skb, rx_ring->queue_index); else macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, - (skb->pkt_type == PACKET_BROADCAST) || - (skb->pkt_type == PACKET_MULTICAST)); + false); + + skb->protocol = eth_type_trans(skb, dev); } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, -- cgit v1.2.3 From 3335915d07f74e6391ce33959e2f25cab273bf73 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:35 -0400 Subject: ixgbe/fm10k: Only support macvlan offload for types that support destination filtering Both the ixgbe and fm10k drivers support destination filtering. Instead of adding a ton of complexity to support either source or passthru mode we can instead just avoid offloading them for now. Doing this we avoid leaking packets into interfaces that aren't meant to receive them. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 8 ++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +++++++ 2 files changed, 15 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index ee645bacfe93..26e749766337 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -22,6 +22,7 @@ #include "fm10k.h" #include #include +#include /** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -1449,6 +1450,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, int size = 0, i; u16 glort; + /* The hardware supported by fm10k only filters on the destination MAC + * address. In order to avoid issues we only support offloading modes + * where the hardware can actually provide the functionality. + */ + if (!macvlan_supports_dest_filter(sdev)) + return ERR_PTR(-EMEDIUMTYPE); + /* allocate l2 accel structure if it is not available */ if (!l2_accel) { /* verify there is enough free GLORTs to support l2_accel */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2c648214148e..76f54771fdb2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9760,6 +9760,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) unsigned int limit; int pool, err; + /* The hardware supported by ixgbe only filters on the destination MAC + * address. In order to avoid issues we only support offloading modes + * where the hardware can actually provide the functionality. + */ + if (!macvlan_supports_dest_filter(vdev)) + return ERR_PTR(-EMEDIUMTYPE); + /* Hardware has a limited number of available pools. Each VF, and the * PF require a pool. Check to ensure we don't attempt to use more * then the available number of pools. -- cgit v1.2.3 From 865255b5a2e5a905a1cd8ff8444604af61ee79d8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:40 -0400 Subject: ixgbe: Drop real_adapter from l2 fwd acceleration structure This patch drops the real_adapter member from the fwd_adapter structure. The general idea behind the change is that the real_adapter is carrying unnecessary data since we could always just grab the adapter structure from netdev_priv(macvlan->lowerdev) if we really needed to get at it. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 28 +++++++++++++++------------ 2 files changed, 16 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7dd5038cfcc4..8fccca57cd6a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -305,7 +305,6 @@ enum ixgbe_ring_state_t { struct ixgbe_fwd_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; - struct ixgbe_adapter *real_adapter; unsigned int tx_base_queue; unsigned int rx_base_queue; int pool; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 76f54771fdb2..f00720582e52 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5338,10 +5338,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) rx_ring->next_to_use = 0; } -static int ixgbe_fwd_ring_up(struct net_device *vdev, +static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { - struct ixgbe_adapter *adapter = accel->real_adapter; + struct net_device *vdev = accel->netdev; int i, baseq, err; if (!test_bit(accel->pool, adapter->fwd_bitmask)) @@ -5378,14 +5378,19 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, return err; } -static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) +static int ixgbe_macvlan_up(struct net_device *vdev, void *data) { - if (netif_is_macvlan(upper)) { - struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); + struct ixgbe_adapter *adapter = data; + struct ixgbe_fwd_adapter *accel; - if (vadapter) - ixgbe_fwd_ring_up(upper, vadapter); - } + if (!netif_is_macvlan(vdev)) + return 0; + + accel = macvlan_accel_priv(vdev); + if (!accel) + return 0; + + ixgbe_fwd_ring_up(adapter, accel); return 0; } @@ -5393,7 +5398,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) { netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_upper_dev_walk, NULL); + ixgbe_macvlan_up, adapter); } static void ixgbe_configure(struct ixgbe_adapter *adapter) @@ -9792,13 +9797,12 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; fwd_adapter->pool = pool; - fwd_adapter->real_adapter = adapter; /* Force reinit of ring allocation with VMDQ enabled */ err = ixgbe_setup_tc(pdev, adapter->hw_tcs); if (!err && netif_running(pdev)) - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + err = ixgbe_fwd_ring_up(adapter, fwd_adapter); if (!err) return fwd_adapter; @@ -9814,7 +9818,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) static void ixgbe_fwd_del(struct net_device *pdev, void *priv) { struct ixgbe_fwd_adapter *accel = priv; - struct ixgbe_adapter *adapter = accel->real_adapter; + struct ixgbe_adapter *adapter = netdev_priv(pdev); unsigned int rxbase = accel->rx_base_queue; unsigned int limit, i; -- cgit v1.2.3 From 8315ef6f395cc4e14171c59e1176711976deeb84 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 3 Apr 2018 17:16:45 -0400 Subject: ixgbe: Avoid performing unnecessary resets for macvlan offload The original implementation for macvlan offload has us performing a full port reset every time we added a new macvlan. This shouldn't be necessary and can be avoided with a few behavior changes. This patches updates the logic for the queues so that we have essentially 3 possible configurations for macvlan offload. They consist of 15 macvlans with 4 queues per macvlan, 31 macvlans with 2 queues per macvlan, and 63 macvlans with 1 queue per macvlan. As macvlans are added you will encounter up to 3 total resets if you add all the way up to 63, and after that the device will stay in the mode supporting up to 63 macvlans until the L2FW flag is cleared. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 192 +++++++++++++++++-------- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 5 +- 2 files changed, 135 insertions(+), 62 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f00720582e52..b6e5cea84949 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5344,15 +5344,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct net_device *vdev = accel->netdev; int i, baseq, err; - if (!test_bit(accel->pool, adapter->fwd_bitmask)) - return 0; - baseq = accel->pool * adapter->num_rx_queues_per_pool; netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", accel->pool, adapter->num_rx_pools, baseq, baseq + adapter->num_rx_queues_per_pool); - accel->netdev = vdev; accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; @@ -5372,9 +5368,17 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, if (err >= 0) return 0; + /* if we cannot add the MAC rule then disable the offload */ + macvlan_release_l2fw_offload(vdev); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = NULL; + netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + + clear_bit(accel->pool, adapter->fwd_bitmask); + kfree(accel); + return err; } @@ -8799,6 +8803,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) } #endif /* CONFIG_IXGBE_DCB */ +static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_fwd_adapter *accel; + int pool; + + /* we only care about macvlans... */ + if (!netif_is_macvlan(vdev)) + return 0; + + /* that have hardware offload enabled... */ + accel = macvlan_accel_priv(vdev); + if (!accel) + return 0; + + /* If we can relocate to a different bit do so */ + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + if (pool < adapter->num_rx_pools) { + set_bit(pool, adapter->fwd_bitmask); + accel->pool = pool; + return 0; + } + + /* if we cannot find a free pool then disable the offload */ + netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); + macvlan_release_l2fw_offload(vdev); + kfree(accel); + + return 0; +} + +static void ixgbe_defrag_macvlan_pools(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* flush any stale bits out of the fwd bitmask */ + bitmap_clear(adapter->fwd_bitmask, 1, 63); + + /* walk through upper devices reassigning pools */ + netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, + adapter); +} + /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * @@ -8866,6 +8913,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) #endif /* CONFIG_IXGBE_DCB */ ixgbe_init_interrupt_scheme(adapter); + ixgbe_defrag_macvlan_pools(dev); + if (netif_running(dev)) return ixgbe_open(dev); @@ -9415,6 +9464,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, return features; } +static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) +{ + int rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + + /* go back to full RSS if we're not running SR-IOV */ + if (!adapter->ring_feature[RING_F_VMDQ].offset) + adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED); + + adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->ring_feature[RING_F_VMDQ].limit = 1; + + ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); +} + static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) { @@ -9495,7 +9560,9 @@ static int ixgbe_set_features(struct net_device *netdev, } } - if (need_reset) + if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) + ixgbe_reset_l2fw_offload(adapter); + else if (need_reset) ixgbe_do_reset(netdev); else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) @@ -9758,11 +9825,9 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { - struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); - int used_pools = adapter->num_vfs + adapter->num_rx_pools; + struct ixgbe_fwd_adapter *accel; int tcs = adapter->hw_tcs ? : 1; - unsigned int limit; int pool, err; /* The hardware supported by ixgbe only filters on the destination MAC @@ -9772,47 +9837,73 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); - /* Hardware has a limited number of available pools. Each VF, and the - * PF require a pool. Check to ensure we don't attempt to use more - * then the available number of pools. - */ - if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) - return ERR_PTR(-EINVAL); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + if (pool == adapter->num_rx_pools) { + u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; + u16 reserved_pools; + + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || + adapter->num_rx_pools > IXGBE_MAX_MACVLANS) + return ERR_PTR(-EBUSY); + + /* Hardware has a limited number of available pools. Each VF, + * and the PF require a pool. Check to ensure we don't + * attempt to use more then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EBUSY); - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || - (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) - return ERR_PTR(-EBUSY); + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED; - fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); - if (!fwd_adapter) - return ERR_PTR(-ENOMEM); + /* Try to reserve as many queues per pool as possible, + * we start with the configurations that support 4 queues + * per pools, followed by 2, and then by just 1 per pool. + */ + if (used_pools < 32 && adapter->num_rx_pools < 16) + reserved_pools = min_t(u16, + 32 - used_pools, + 16 - adapter->num_rx_pools); + else if (adapter->num_rx_pools < 32) + reserved_pools = min_t(u16, + 64 - used_pools, + 32 - adapter->num_rx_pools); + else + reserved_pools = 64 - used_pools; - pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - set_bit(pool, adapter->fwd_bitmask); - limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1); - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + if (!reserved_pools) + return ERR_PTR(-EBUSY); - fwd_adapter->pool = pool; + adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, adapter->hw_tcs); + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, adapter->hw_tcs); + if (err) + return ERR_PTR(err); - if (!err && netif_running(pdev)) - err = ixgbe_fwd_ring_up(adapter, fwd_adapter); + if (pool >= adapter->num_rx_pools) + return ERR_PTR(-ENOMEM); + } - if (!err) - return fwd_adapter; + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) + return ERR_PTR(-ENOMEM); + + set_bit(pool, adapter->fwd_bitmask); + accel->pool = pool; + accel->netdev = vdev; - /* unwind counter and free adapter struct */ - netdev_info(pdev, - "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, adapter->fwd_bitmask); - kfree(fwd_adapter); - return ERR_PTR(err); + if (!netif_running(pdev)) + return accel; + + err = ixgbe_fwd_ring_up(adapter, accel); + if (err) + return ERR_PTR(err); + + return accel; } static void ixgbe_fwd_del(struct net_device *pdev, void *priv) @@ -9820,7 +9911,7 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) struct ixgbe_fwd_adapter *accel = priv; struct ixgbe_adapter *adapter = netdev_priv(pdev); unsigned int rxbase = accel->rx_base_queue; - unsigned int limit, i; + unsigned int i; /* delete unicast filter associated with offloaded interface */ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, @@ -9844,25 +9935,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) } clear_bit(accel->pool, adapter->fwd_bitmask); - limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - - /* go back to full RSS if we're done with our VMQs */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { - int rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_RSS].limit = rss; - } - - ixgbe_setup_tc(pdev, adapter->hw_tcs); - netdev_dbg(pdev, "pool %i:%i queues %i:%i\n", - accel->pool, adapter->num_rx_pools, - accel->rx_base_queue, - accel->rx_base_queue + - adapter->num_rx_queues_per_pool); kfree(accel); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 008aa073a679..bfc4171cd3f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -266,7 +266,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) #endif /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; rss = min_t(int, ixgbe_max_rss_indices(adapter), @@ -312,7 +312,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) * other values out of range. */ num_tc = adapter->hw_tcs; - num_rx_pools = adapter->num_rx_pools; + num_rx_pools = bitmap_weight(adapter->fwd_bitmask, + adapter->num_rx_pools); limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; -- cgit v1.2.3 From 83dd693f36509c2f2b6df8dca28b6dadc346f41f Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:50 -0700 Subject: igb: Fix not adding filter elements to the list Because the order of the parameters passes to 'hlist_add_behind()' was inverted, the 'parent' node was added "behind" the 'input', as input is not in the list, this causes the 'input' node to be lost. Fixes: 0e71def25281 ("igb: add support of RX network flow classification") Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e77ba0d5866d..5975d432836f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2865,7 +2865,7 @@ static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_behind(&parent->nfc_node, &input->nfc_node); + hlist_add_behind(&input->nfc_node, &parent->nfc_node); else hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); -- cgit v1.2.3 From 4dc93fcf0b95dc3fda4db917effae31fbb8ad2a8 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:51 -0700 Subject: igb: Fix queue selection on MAC filters on i210 On the RAH registers there are semantic differences on the meaning of the "queue" parameter for traffic steering depending on the controller model: there is the 82575 meaning, which "queue" means a RX Hardware Queue, and the i350 meaning, where it is a reception pool. The previous behaviour was having no effect for i210 based controllers because the QSEL bit of the RAH register wasn't being set. This patch separates the condition in discrete cases, so the different handling is clearer. Fixes: 83c21335c876 ("igb: improve MAC filter handling") Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cce7ada89255..9afee130c2aa 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8763,12 +8763,17 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) if (is_valid_ether_addr(addr)) rar_high |= E1000_RAH_AV; - if (hw->mac.type == e1000_82575) + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; - else + break; + default: rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + break; + } } wr32(E1000_RAL(index), rar_low); -- cgit v1.2.3 From 6995ddc49405bd4afe2aa7937f38abd2ee706084 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:52 -0700 Subject: igb: Enable the hardware traffic class feature bit for igb models This will allow functionality depending on the hardware being traffic class aware to work. In particular the tc-flower offloading checks verifies that this bit is set. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9afee130c2aa..0facaa73f626 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2822,6 +2822,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CRC; + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ -- cgit v1.2.3 From 1d717cf4115e283aaf6c38174ecb7646f96abab5 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:53 -0700 Subject: igb: Add support for MAC address filters specifying source addresses Makes it possible to direct packets to queues based on their source address. Documents the expected usage of the 'flags' parameter. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_defines.h | 1 + drivers/net/ethernet/intel/igb/igb.h | 1 + drivers/net/ethernet/intel/igb/igb_main.c | 40 ++++++++++++++++++++++---- 3 files changed, 37 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 98534f765e0e..5417edbe3125 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -491,6 +491,7 @@ * manageability enabled, allowing us room for 15 multicast addresses. */ #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_POOL_MASK 0x03FC0000 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 8dbc399b345e..1ff58f7a5c7d 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -474,6 +474,7 @@ struct igb_mac_addr { #define IGB_MAC_STATE_DEFAULT 0x1 #define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 /* board specific private data structure */ struct igb_adapter { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0facaa73f626..902db2096bfd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6859,8 +6859,14 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter) igb_rar_set_index(adapter, 0); } -static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, - const u8 queue) +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) { struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - @@ -6880,7 +6886,7 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, ether_addr_copy(adapter->mac_table[i].addr, addr); adapter->mac_table[i].queue = queue; - adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; igb_rar_set_index(adapter, i); return i; @@ -6889,8 +6895,21 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOSPC; } -static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) { struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - @@ -6907,12 +6926,14 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, for (i = 0; i < rar_entries; i++) { if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; if (adapter->mac_table[i].queue != queue) continue; if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) continue; - adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].state = 0; memset(adapter->mac_table[i].addr, 0, ETH_ALEN); adapter->mac_table[i].queue = 0; @@ -6923,6 +6944,12 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOENT; } +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -8766,6 +8793,9 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) if (is_valid_ether_addr(addr)) rar_high |= E1000_RAH_AV; + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + switch (hw->mac.type) { case e1000_82575: case e1000_i210: -- cgit v1.2.3 From 0a8238998345549011c2a0da43cd6a35ea7bbebe Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:54 -0700 Subject: igb: Add support for enabling queue steering in filters On some igb models (82575 and i210) the MAC address filters can control to which queue the packet will be assigned. This extends the 'state' with one more state to signify that queue selection should be enabled for that filter. As 82575 parts are no longer easily obtained (and this was developed against i210), only support for the i210 model is enabled. These functions are exported and will be used in the next patch. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_defines.h | 1 + drivers/net/ethernet/intel/igb/igb.h | 6 ++++++ drivers/net/ethernet/intel/igb/igb_main.c | 26 ++++++++++++++++++++++++++ 3 files changed, 33 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 5417edbe3125..d3d1d868e7ba 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -492,6 +492,7 @@ */ #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ #define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_POOL_MASK 0x03FC0000 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1ff58f7a5c7d..c71845a450f7 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -475,6 +475,7 @@ struct igb_mac_addr { #define IGB_MAC_STATE_DEFAULT 0x1 #define IGB_MAC_STATE_IN_USE 0x2 #define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 /* board specific private data structure */ struct igb_adapter { @@ -740,4 +741,9 @@ int igb_add_filter(struct igb_adapter *adapter, int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 902db2096bfd..21e76d649068 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6950,6 +6950,28 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, return igb_del_mac_filter_flags(adapter, addr, queue, 0); } +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -8799,6 +8821,10 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) switch (hw->mac.type) { case e1000_82575: case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; break; -- cgit v1.2.3 From 872f923c5b38226a0c6825487742098b1968d463 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:55 -0700 Subject: igb: Allow filters to be added for the local MAC address Users expect that when adding a steering filter for the local MAC address, that all the traffic directed to that address will go to some queue. Currently, it's not possible to configure entries in the "in use" state, which is the normal state of the local MAC address entry (it is the default), this patch allows to override the steering configuration of "in use" entries, if the filter to be added match the address and address type (source or destination) of an existing entry. There is a bit of a special handling for entries referring to the local MAC address, when they are removed, only the steering configuration is reset. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 40 +++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 21e76d649068..9a5c2dc3cf5a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6859,6 +6859,27 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter) igb_rar_set_index(adapter, 0); } +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + /* Add a MAC filter for 'addr' directing matching traffic to 'queue', * 'flags' is used to indicate what kind of match is made, match is by * default for the destination address, if matching by source address @@ -6881,7 +6902,8 @@ static int igb_add_mac_filter_flags(struct igb_adapter *adapter, * addresses. */ for (i = 0; i < rar_entries; i++) { - if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) continue; ether_addr_copy(adapter->mac_table[i].addr, addr); @@ -6933,9 +6955,19 @@ static int igb_del_mac_filter_flags(struct igb_adapter *adapter, if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) continue; - adapter->mac_table[i].state = 0; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].queue = 0; + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } igb_rar_set_index(adapter, i); return 0; -- cgit v1.2.3 From bae51fefe2ac6ec2fe27f9d041aa3be87cae85af Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:56 -0700 Subject: igb: Enable nfc filters to specify MAC addresses This allows igb_add_filter()/igb_erase_filter() to work on filters that include MAC addresses (both source and destination). For now, this only exposes the functionality, the next commit glues ethtool into this. Later in this series, these APIs are used to allow offloading of cls_flower filters. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 4 ++++ drivers/net/ethernet/intel/igb/igb_ethtool.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index c71845a450f7..0eac1df499f8 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -442,6 +442,8 @@ struct hwmon_buff { enum igb_filter_match_flags { IGB_FILTER_FLAG_ETHER_TYPE = 0x1, IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, }; #define IGB_MAX_RXNFC_FILTERS 16 @@ -456,6 +458,8 @@ struct igb_nfc_input { u8 match_flags; __be16 etype; __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; }; struct igb_nfc_filter { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 5975d432836f..31b2960a7869 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2776,6 +2776,25 @@ int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) return err; } + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) err = igb_rxnfc_write_vlan_prio_filter(adapter, input); @@ -2824,6 +2843,15 @@ int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) igb_clear_vlan_prio_filter(adapter, ntohs(input->filter.vlan_tci)); + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + return 0; } -- cgit v1.2.3 From b4a38d4276e1c3ca46141d6424a8da86bc5b74b0 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:57 -0700 Subject: igb: Add MAC address support for ethtool nftuple filters This adds the capability of configuring the queue steering of arriving packets based on their source and destination MAC addresses. Source address steering (i.e. driving traffic to a specific queue), for the i210, does not work, but filtering does (i.e. accepting traffic based on the source address). So, trying to add a filter specifying only a source address will be an error. In practical terms this adds support for the following use cases, characterized by these examples: $ ethtool -N eth0 flow-type ether dst aa:aa:aa:aa:aa:aa action 0 (this will direct packets with destination address "aa:aa:aa:aa:aa:aa" to the RX queue 0) $ ethtool -N eth0 flow-type ether src 44:44:44:44:44:44 \ proto 0x22f0 action 3 (this will direct packets with source address "44:44:44:44:44:44" and ethertype 0x22f0 to the RX queue 3) Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 43 +++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 31b2960a7869..6697c273ab59 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2495,6 +2495,23 @@ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, fsp->h_ext.vlan_tci = rule->filter.vlan_tci; fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + return 0; } return -EINVAL; @@ -2768,8 +2785,16 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) { + struct e1000_hw *hw = &adapter->hw; int err = -EINVAL; + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { err = igb_rxnfc_write_etype_filter(adapter, input); if (err) @@ -2933,10 +2958,6 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) return -EINVAL; - if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && - fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) - return -EINVAL; - input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; @@ -2946,6 +2967,20 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; } + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { err = -EINVAL; -- cgit v1.2.3 From f8f3d34e5470c48a10c27c4f6f65e149e10d04ec Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:58 -0700 Subject: igb: Add the skeletons for tc-flower offloading This adds basic functions needed to implement offloading for filters created by tc-flower. Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 66 +++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9a5c2dc3cf5a..26b0f0efc97e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -2513,6 +2514,69 @@ static int igb_offload_cbs(struct igb_adapter *adapter, return 0; } +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + return -EOPNOTSUPP; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + return -EOPNOTSUPP; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_STATS: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block(struct igb_adapter *adapter, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, + adapter, adapter); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, + adapter); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2521,6 +2585,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return igb_setup_tc_block(adapter, type_data); default: return -EOPNOTSUPP; -- cgit v1.2.3 From 3e3e9fd8b6f0dd2d387d0dc666b770fe0dc36b33 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:17:01 -0700 Subject: nfp: reset local locks on init NFP locks record the owner when held, for PCIe devices the owner ID will be the PCIe link number. When driver loads it should scan known locks and if they indicate that they are held by local endpoint but the driver doesn't hold them - release them. Locks can be left taken for instance when kernel gets kexec-ed or after a crash. Management FW tries to clean up stale locks too, but it currently depends on PCIe link going down which doesn't always happen. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 5 ++ drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h | 2 + .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 2 + .../net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c | 45 +++++++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_resource.c | 59 ++++++++++++++++++++++ 5 files changed, 113 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index c4b1f344b4da..0ade122805ad 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -486,6 +486,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + pf->hwinfo = nfp_hwinfo_read(pf->cpp); dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", @@ -548,6 +552,7 @@ err_fw_unload: vfree(pf->dumpspec); err_hwinfo_free: kfree(pf->hwinfo); +err_cpp_free: nfp_cpp_free(pf->cpp); err_disable_msix: destroy_workqueue(pf->wq); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index ced62d112aa2..f44d0a857314 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +int nfp_resource_table_init(struct nfp_cpp *cpp); + struct nfp_resource * nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index c8f2c064cce3..4e19add1c539 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -295,6 +295,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address); /** * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index cb28ac03e4ca..c88bf673cb76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface) return (u32)interface << 16 | 0x0000; } +static u32 nfp_mutex_owner(u32 val) +{ + return val >> 16; +} + static bool nfp_mutex_is_locked(u32 val) { return (val & 0xffff) == 0x000f; @@ -351,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; } + +/** + * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * + * Release lock if held by local system. Extreme care is advised, call only + * when no local lock users can exist. + * + * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex + */ +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + /* Check lock */ + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; + + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + /* Bust the lock */ + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + return 1; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 7e14725055c7..2dd89dba9311 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res) { return res->size; } + +/** + * nfp_resource_table_init() - Run initial checks on the resource table + * @cpp: NFP CPP handle + * + * Start-of-day init procedure for resource table. Must be called before + * any local resource table users may exist. + * + * Return: 0 on success, -errno on failure + */ +int nfp_resource_table_init(struct nfp_cpp *cpp) +{ + struct nfp_cpp_mutex *dev_mutex; + int i, err; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE); + if (err < 0) { + nfp_err(cpp, "Error: failed to reclaim resource table mutex\n"); + return err; + } + if (err) + nfp_warn(cpp, "Warning: busted main resource table mutex\n"); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) + return -ENOMEM; + + if (nfp_cpp_mutex_lock(dev_mutex)) { + nfp_err(cpp, "Error: failed to claim resource table mutex\n"); + nfp_cpp_mutex_free(dev_mutex); + return -EINVAL; + } + + /* Resource 0 is the dev_mutex, start from 1 */ + for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr); + if (err < 0) { + nfp_err(cpp, + "Error: failed to reclaim resource %d mutex\n", + i); + goto err_unlock; + } + if (err) + nfp_warn(cpp, "Warning: busted resource %d mutex\n", i); + } + + err = 0; +err_unlock: + nfp_cpp_mutex_unlock(dev_mutex); + nfp_cpp_mutex_free(dev_mutex); + + return err; +} -- cgit v1.2.3 From dd92a7d1afef6229812a8980e89a22cfc9ddca94 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 24 Apr 2018 21:17:02 -0700 Subject: nfp: print PCIe link bandwidth on probe To aid debugging of performance issues caused by limited PCIe bandwidth print the PCIe link information on probe. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index cd678323bacb..a0e336bd1d85 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1330,6 +1330,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) /* Finished with card initialization. */ dev_info(&pdev->dev, "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + pcie_print_link_status(pdev); nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) { -- cgit v1.2.3 From 54a4a03439305f701834cef1e4bd108548388d83 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 24 Apr 2018 21:17:03 -0700 Subject: nfp: flower: support offloading multiple rules with same cookie When multiple netdevs are attached to a tc offload block and register for callbacks, a rule added to the block will be propogated to all netdevs. Previously these were detected as duplicates (based on cookie) and rejected. Modify the rule nfp lookup function to optionally include an ingress netdev and a host context along with the cookie value when searching for a rule. When a new rule is passed to the driver, the netdev the rule is to be attached to is considered when searching for dublicates. When a stats update is received from HW, the host context is used alongside the cookie to map to the correct host rule. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 8 +++++-- .../net/ethernet/netronome/nfp/flower/metadata.c | 20 +++++++++------- .../net/ethernet/netronome/nfp/flower/offload.c | 27 ++++++++++++++++------ 3 files changed, 38 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c67e1b54c614..9e6804bc9b40 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -47,6 +47,7 @@ struct net_device; struct nfp_app; +#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) #define NFP_FL_STATS_ENTRY_RS BIT(20) #define NFP_FL_STATS_ELEM_RS 4 #define NFP_FL_REPEATED_HASH_MAX BIT(17) @@ -189,6 +190,7 @@ struct nfp_fl_payload { spinlock_t lock; /* lock stats */ struct nfp_fl_stats stats; __be32 nfp_tun_ipv4_addr; + struct net_device *ingress_dev; char *unmasked_data; char *mask_data; char *action_data; @@ -216,12 +218,14 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_flow); int nfp_compile_flow_metadata(struct nfp_app *app, struct tc_cls_flower_offload *flow, - struct nfp_fl_payload *nfp_flow); + struct nfp_fl_payload *nfp_flow, + struct net_device *netdev); int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload *nfp_flow); struct nfp_fl_payload * -nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); +nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, + struct net_device *netdev, __be32 host_ctx); struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index db977cf8e933..21668aa435e8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) /* Must be called with either RTNL or rcu_read_lock */ struct nfp_fl_payload * -nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie) +nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, + struct net_device *netdev, __be32 host_ctx) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flower_entry; hash_for_each_possible_rcu(priv->flow_table, flower_entry, link, tc_flower_cookie) - if (flower_entry->tc_flower_cookie == tc_flower_cookie) + if (flower_entry->tc_flower_cookie == tc_flower_cookie && + (!netdev || flower_entry->ingress_dev == netdev) && + (host_ctx == NFP_FL_STATS_CTX_DONT_CARE || + flower_entry->meta.host_ctx_id == host_ctx)) return flower_entry; return NULL; @@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats) flower_cookie = be64_to_cpu(stats->stats_cookie); rcu_read_lock(); - nfp_flow = nfp_flower_search_fl_table(app, flower_cookie); + nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL, + stats->stats_con_id); if (!nfp_flow) goto exit_rcu_unlock; - if (nfp_flow->meta.host_ctx_id != stats->stats_con_id) - goto exit_rcu_unlock; - spin_lock(&nfp_flow->lock); nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count); nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count); @@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, int nfp_compile_flow_metadata(struct nfp_app *app, struct tc_cls_flower_offload *flow, - struct nfp_fl_payload *nfp_flow) + struct nfp_fl_payload *nfp_flow, + struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *check_entry; @@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, nfp_flow->stats.bytes = 0; nfp_flow->stats.used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie); + check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, + NFP_FL_STATS_CTX_DONT_CARE); if (check_entry) { if (nfp_release_stats_entry(app, stats_cxt)) return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 114d2ab02a38..bdc82e11a31e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -419,6 +419,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_key_ls; } + flow_pay->ingress_dev = egress ? NULL : netdev; + err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, tun_type); if (err) @@ -428,7 +430,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay); + err = nfp_compile_flow_metadata(app, flow, flow_pay, + flow_pay->ingress_dev); if (err) goto err_destroy_flow; @@ -462,6 +465,7 @@ err_free_key_ls: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure + * @egress: Netdev is the egress dev. * * Removes a flow from the repeated hash structure and clears the * action payload. @@ -470,13 +474,16 @@ err_free_key_ls: */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct tc_cls_flower_offload *flow, bool egress) { struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_fl_payload *nfp_flow; + struct net_device *ingr_dev; int err; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); + ingr_dev = egress ? NULL : netdev; + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) return -ENOENT; @@ -505,7 +512,9 @@ err_free_flow: /** * nfp_flower_get_stats() - Populates flow stats obtained from hardware. * @app: Pointer to the APP handle + * @netdev: Netdev structure. * @flow: TC flower classifier offload structure + * @egress: Netdev is the egress dev. * * Populates a flow statistics structure which which corresponds to a * specific flow. @@ -513,11 +522,15 @@ err_free_flow: * Return: negative value on error, 0 if stats populated successfully. */ static int -nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) +nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, + struct tc_cls_flower_offload *flow, bool egress) { struct nfp_fl_payload *nfp_flow; + struct net_device *ingr_dev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); + ingr_dev = egress ? NULL : netdev; + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) return -EINVAL; @@ -543,9 +556,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, case TC_CLSFLOWER_REPLACE: return nfp_flower_add_offload(app, netdev, flower, egress); case TC_CLSFLOWER_DESTROY: - return nfp_flower_del_offload(app, netdev, flower); + return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: - return nfp_flower_get_stats(app, flower); + return nfp_flower_get_stats(app, netdev, flower, egress); } return -EOPNOTSUPP; -- cgit v1.2.3 From c50647d3e8fd4149ecd78e9a234336e727f48107 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 24 Apr 2018 21:17:04 -0700 Subject: nfp: flower: ignore duplicate cb requests for same rule If a flower rule has a repr both as ingress and egress port then 2 callbacks may be generated for the same rule request. Add an indicator to each flow as to whether or not it was added from an ingress registered cb. If so then ignore add/del/stat requests to it from an egress cb. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 1 + .../net/ethernet/netronome/nfp/flower/offload.c | 23 +++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9e6804bc9b40..733ff53cc601 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -194,6 +194,7 @@ struct nfp_fl_payload { char *unmasked_data; char *mask_data; char *action_data; + bool ingress_offload; }; struct nfp_fl_stats_frame { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bdc82e11a31e..70ec9d821b91 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } static struct nfp_fl_payload * -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) { struct nfp_fl_payload *flow_pay; @@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) flow_pay->meta.flags = 0; spin_lock_init(&flow_pay->lock); + flow_pay->ingress_offload = !egress; + return flow_pay; err_free_mask: @@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; + struct net_device *ingr_dev; int err; + ingr_dev = egress ? NULL : netdev; + flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); + if (flow_pay) { + /* Ignore as duplicate if it has been added by different cb. */ + if (flow_pay->ingress_offload && egress) + return 0; + else + return -EOPNOTSUPP; + } + key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM; @@ -413,7 +427,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_free_key_ls; - flow_pay = nfp_flower_allocate_new(key_layer); + flow_pay = nfp_flower_allocate_new(key_layer, egress); if (!flow_pay) { err = -ENOMEM; goto err_free_key_ls; @@ -485,7 +499,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) - return -ENOENT; + return egress ? 0 : -ENOENT; err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -534,6 +548,9 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, if (!nfp_flow) return -EINVAL; + if (nfp_flow->ingress_offload && egress) + return 0; + spin_lock_bh(&nfp_flow->lock); tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, nfp_flow->stats.pkts, nfp_flow->stats.used); -- cgit v1.2.3 From e086be9ab21cc2a0894e0f72ff8c974aba0b296e Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 10 Apr 2018 10:49:59 -0700 Subject: igb: Add support for adding offloaded clsflower filters This allows filters added by tc-flower and specifying MAC addresses, Ethernet types, and the VLAN priority field, to be offloaded to the controller. This reuses most of the infrastructure used by ethtool, but clsflower filters are kept in a separated list, so they are invisible to ethtool. To setup clsflower offloading: $ tc qdisc replace dev eth0 handle 100: parent root mqprio \ num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 \ queues 1@0 1@1 2@2 hw 0 (clsflower offloading depends on the netword driver to be configured with multiple traffic classes, we use mqprio's 'num_tc' parameter to set it to 3) $ tc qdisc add dev eth0 ingress Examples of filters: $ tc filter add dev eth0 parent ffff: flower \ dst_mac aa:aa:aa:aa:aa:aa \ hw_tc 2 skip_sw (just a simple filter filtering for the destination MAC address and steering that traffic to queue 2) $ tc filter add dev enp2s0 parent ffff: proto 0x22f0 flower \ src_mac cc:cc:cc:cc:cc:cc \ hw_tc 1 skip_sw (as the i210 doesn't support steering traffic based on the source address alone, we need to use another steering traffic, in this case we are using the ethernet type (0x22f0) to steer traffic to queue 1) Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 2 + drivers/net/ethernet/intel/igb/igb_main.c | 188 +++++++++++++++++++++++++++++- 2 files changed, 188 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 0eac1df499f8..990a7fb32e4e 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -465,6 +465,7 @@ struct igb_nfc_input { struct igb_nfc_filter { struct hlist_node nfc_node; struct igb_nfc_input filter; + unsigned long cookie; u16 etype_reg_index; u16 sw_idx; u16 action; @@ -604,6 +605,7 @@ struct igb_adapter { /* RX network flow classification support */ struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; unsigned int nfc_filter_count; /* lock for RX network flow classification filter */ spinlock_t nfc_lock; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 26b0f0efc97e..3ce43207a35f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2514,16 +2514,197 @@ static int igb_offload_cbs(struct igb_adapter *adapter, return 0; } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct netlink_ext_ack *extack = f->common.extack; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + if (!is_zero_ether_addr(mask->dst)) { + if (!is_broadcast_ether_addr(mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, key->dst); + } + + if (!is_zero_ether_addr(mask->src)) { + if (!is_broadcast_ether_addr(mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, key->src); + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + + if (mask->n_proto) { + if (mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = key->n_proto; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + + if (mask->vlan_priority) { + if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + static int igb_configure_clsflower(struct igb_adapter *adapter, struct tc_cls_flower_offload *cls_flower) { - return -EOPNOTSUPP; + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; } static int igb_delete_clsflower(struct igb_adapter *adapter, struct tc_cls_flower_offload *cls_flower) { - return -EOPNOTSUPP; + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; } static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, @@ -9368,6 +9549,9 @@ static void igb_nfc_filter_exit(struct igb_adapter *adapter) hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) igb_erase_filter(adapter, rule); + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + spin_unlock(&adapter->nfc_lock); } -- cgit v1.2.3 From 4d7b44833e9a129d3220152727a19cceea3a2ac4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 25 Apr 2018 11:31:13 +0100 Subject: net: amd8111e: remove redundant duplicated if statement There are two identical nested if statements, the second is redundant and can be removed. Also clean up white space formatting. Cleans up cppcheck warning: drivers/net/ethernet/amd/amd8111e.c:1080: (warning) Identical inner 'if' condition is always true. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index c99e3e845ac0..a90080f12e67 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev) amd8111e_set_coalesce(dev,TX_INTR_COAL); coal_conf->tx_coal_type = MEDIUM_COALESCE; } - - } - else if(tx_pkt_size >= 1024){ - if (tx_pkt_size >= 1024){ - if(coal_conf->tx_coal_type != HIGH_COALESCE){ - coal_conf->tx_timeout = 4; - coal_conf->tx_event_count = 8; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = HIGH_COALESCE; - } + } else if (tx_pkt_size >= 1024) { + if (coal_conf->tx_coal_type != HIGH_COALESCE) { + coal_conf->tx_timeout = 4; + coal_conf->tx_event_count = 8; + amd8111e_set_coalesce(dev, TX_INTR_COAL); + coal_conf->tx_coal_type = HIGH_COALESCE; } } } -- cgit v1.2.3 From 6f5d295909c9e10a715c4f9f6301ffa701398112 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 25 Apr 2018 11:43:07 +0100 Subject: mkiss: remove redundant check for len > 0 The check for len > 0 is always true and hence is redundant as this check is already being made to execute the code inside the while-loop. Hence it is redundant and can be removed. Cleans up cppcheck warning: drivers/net/hamradio/mkiss.c:220: (warning) Identical inner 'if' condition is always true. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/hamradio/mkiss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c180b480f8ef..13e4c1eff353 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -217,7 +217,7 @@ static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, c = *s++; else if (len > 1) c = crc >> 8; - else if (len > 0) + else c = crc & 0xff; len--; -- cgit v1.2.3 From a8cbb46f831d2c101feccdd0e0daf3627b8c1dca Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 22 Oct 2017 15:58:26 +0300 Subject: iwlwifi: allow different csr flags for different device families Different device families may have different flag values for passing a message to the fw (i.e. SW_RESET). In order to keep the code readable, and avoid conditioning upon the family, store a value for each flag, which indicates the bit that needs to be enabled. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/1000.c | 8 ++- drivers/net/wireless/intel/iwlwifi/cfg/2000.c | 13 +++-- drivers/net/wireless/intel/iwlwifi/cfg/22000.c | 7 +++ drivers/net/wireless/intel/iwlwifi/cfg/5000.c | 8 ++- drivers/net/wireless/intel/iwlwifi/cfg/6000.c | 19 +++++-- drivers/net/wireless/intel/iwlwifi/cfg/7000.c | 5 +- drivers/net/wireless/intel/iwlwifi/cfg/8000.c | 5 +- drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 3 +- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 65 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 28 +--------- .../net/wireless/intel/iwlwifi/iwl-eeprom-read.c | 8 ++- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 8 ++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 3 +- .../net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 15 +++-- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 65 ++++++++++++---------- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 16 +++--- 16 files changed, 184 insertions(+), 92 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index b2573b1d1506..591687984962 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -27,7 +28,6 @@ #include #include #include "iwl-config.h" -#include "iwl-csr.h" #include "iwl-agn-hw.h" /* Highest firmware API version supported */ @@ -91,7 +91,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -117,7 +118,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index 1b32ad413b9e..a63ca8820568 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -115,7 +116,8 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl2000_2bgn_cfg = { @@ -142,7 +144,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -163,7 +166,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -190,7 +194,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index f2b6e0e8d787..afc78a1e1cfc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -143,6 +143,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -153,6 +154,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -164,6 +166,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_JF_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -174,6 +177,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -184,6 +188,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_F0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -194,6 +199,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_JF_B0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -204,6 +210,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 4aa8f0a05c8a..a224f1be1ec2 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -28,7 +29,6 @@ #include #include "iwl-config.h" #include "iwl-agn-hw.h" -#include "iwl-csr.h" /* Highest firmware API version supported */ #define IWL5000_UCODE_API_MAX 5 @@ -89,7 +89,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -153,7 +154,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 39335b7b0c16..51cec0bb75fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -135,7 +136,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -189,7 +191,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -225,7 +228,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -280,7 +284,8 @@ const struct iwl_cfg iwl130_bg_cfg = { .base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -313,7 +318,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -339,7 +345,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index ce741beec1fc..8e9684e8e603 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -167,7 +169,8 @@ static const struct iwl_ht_params iwl7000_ht_params = { .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ .non_shared_ant = ANT_A, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .dccm_offset = IWL7000_DCCM_OFFSET + .dccm_offset = IWL7000_DCCM_OFFSET, \ + .csr = &iwl_csr_v1 #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 3f4d9bac9f73..e8299b2ab6bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -8,6 +8,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +35,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -158,7 +160,8 @@ static const struct iwl_tt_params iwl8000_tt_params = { .apmg_not_supported = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000 + .min_umac_error_event_table = 0x800000, \ + .csr = &iwl_csr_v1 #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e1c869a1f8cc..fe376fce3444 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -156,7 +156,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .rf_id = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000 + .min_umac_error_event_table = 0x800000, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 14e69e7a2287..7f0b7cea8b9b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,6 +71,7 @@ #include #include #include +#include "iwl-csr.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, @@ -284,6 +287,44 @@ struct iwl_pwr_tx_backoff { u32 backoff; }; +/** + * struct iwl_csr_params + * + * @flag_sw_reset: reset the device + * @flag_mac_clock_ready: + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that device has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * This note is relevant only for pre 5xxx devices. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + * @flag_init_done: Host sets this to put device into fully operational + * D0 power mode. Host resets this after SW_RESET to put device into + * low power mode. + * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup, + * to allow host access to device-internal resources. Host must wait for + * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device + * registers. + * @flag_val_mac_access_en: mac access is enabled + * @flag_master_dis: disable master + * @flag_stop_master: stop master + * @addr_sw_reset: address for resetting the device + */ +struct iwl_csr_params { + u8 flag_sw_reset; + u8 flag_mac_clock_ready; + u8 flag_init_done; + u8 flag_mac_access_req; + u8 flag_val_mac_access_en; + u8 flag_master_dis; + u8 flag_stop_master; + u8 addr_sw_reset; +}; + /** * struct iwl_cfg * @name: Official name of the device @@ -316,6 +357,7 @@ struct iwl_pwr_tx_backoff { * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs + * @csr: csr flags and addresses that are different across devices * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the @@ -354,6 +396,7 @@ struct iwl_cfg { const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; + const struct iwl_csr_params *csr; enum iwl_device_family device_family; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; @@ -400,6 +443,28 @@ struct iwl_cfg { u32 extra_phy_cfg_flags; }; +static const struct iwl_csr_params iwl_csr_v1 = { + .flag_mac_clock_ready = 0, + .flag_val_mac_access_en = 0, + .flag_init_done = 2, + .flag_mac_access_req = 3, + .flag_sw_reset = 7, + .flag_master_dis = 8, + .flag_stop_master = 9, + .addr_sw_reset = (CSR_BASE + 0x020) +}; + +static const struct iwl_csr_params iwl_csr_v2 = { + .flag_init_done = 6, + .flag_mac_clock_ready = 20, + .flag_val_mac_access_en = 20, + .flag_mac_access_req = 21, + .flag_master_dis = 28, + .flag_stop_master = 29, + .flag_sw_reset = 31, + .addr_sw_reset = (CSR_BASE + 0x024) +}; + /* * This list declares the config structures for all devices. */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 4f0d070eda54..ba971d3946e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -8,6 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +35,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -257,7 +259,6 @@ /* RESET */ #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) -#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) @@ -280,35 +281,10 @@ * 4: GOING_TO_SLEEP * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. - * 3: MAC_ACCESS_REQ - * Host sets this to request and maintain MAC wakeup, to allow host - * access to device-internal resources. Host must wait for - * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR - * device registers. - * 2: INIT_DONE - * Host sets this to put device into fully operational D0 power mode. - * Host resets this after SW_RESET to put device into low power mode. - * 0: MAC_CLOCK_READY - * Indicates MAC (ucode processor, etc.) is powered up and can run. - * Internal resources are accessible. - * NOTE: This does not indicate that the processor is actually running. - * NOTE: This does not indicate that device has completed - * init or post-power-down restore of internal SRAM memory. - * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that - * SRAM is restored and uCode is in normal operation mode. - * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and - * do not need to save/restore it. - * NOTE: After device reset, this bit remains "0" until host sets - * INIT_DONE */ -#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) -#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) -#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) -#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) - #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index f2cea1c7befc..ac965c34a2f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -199,12 +201,12 @@ static int iwl_init_otp_access(struct iwl_trans *trans) /* Enable 40MHz radio clock */ iwl_write32(trans, CSR_GP_CNTRL, iwl_read32(trans, CSR_GP_CNTRL) | - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* wait for clock to be ready */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_ERR(trans, "Time out access OTP\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 0497c7a44def..b002a7afb5f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -520,15 +522,15 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) /* set INIT_DONE flag */ iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* and wait for clock stabilization */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); err = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (err < 0) { IWL_DEBUG_INFO(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f25ce3a1ea50..f772d70a65e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -201,7 +202,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); rxq->need_update = true; return; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index cb4012541f45..b8e8dac2895d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,7 +94,8 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to @@ -100,8 +103,9 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) * and accesses to uCode SRAM. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), + 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to init the card\n"); return ret; @@ -143,7 +147,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_clear_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) @@ -187,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f8a0234d332c..623476603cd4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -179,7 +181,8 @@ out: static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_sw_reset)); usleep_range(5000, 6000); } @@ -372,7 +375,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to @@ -380,8 +384,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * and accesses to uCode SRAM. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), + 25000); if (ret < 0) { IWL_ERR(trans, "Failed to init the card\n"); return ret; @@ -459,15 +464,16 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to * device-internal resources is possible. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (WARN_ON(ret < 0)) { IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); @@ -519,7 +525,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, @@ -541,11 +547,12 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) int ret; /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_stop_master)); - ret = iwl_poll_bit(trans, CSR_RESET, - CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_master_dis), + BIT(trans->cfg->csr->flag_master_dis), 100); if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); @@ -594,7 +601,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); } static int iwl_pcie_nic_init(struct iwl_trans *trans) @@ -1267,7 +1274,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); @@ -1497,9 +1504,9 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); iwl_pcie_enable_rx_wake(trans, false); @@ -1543,15 +1550,17 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_mac_access_req)); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); @@ -1562,7 +1571,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); } else { iwl_trans_pcie_tx_reset(trans); @@ -1939,7 +1948,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); @@ -1964,8 +1973,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + BIT(trans->cfg->csr->flag_val_mac_access_en), + (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { iwl_trans_pcie_dump_regs(trans); @@ -2003,7 +2012,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the @@ -3232,12 +3241,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * id located at the AUX bus MISC address space. */ iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index cf468b9f5a82..1cbb8f470afd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -273,7 +274,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); txq->need_update = true; return; } @@ -611,7 +612,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); } /* @@ -1171,6 +1172,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); @@ -1188,19 +1190,19 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (trans->cfg->base_params->apmg_wake_up_wa && + if (cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(cfg->csr->flag_mac_access_req)); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + BIT(cfg->csr->flag_val_mac_access_en), + (BIT(cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(cfg->csr->flag_mac_access_req)); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } -- cgit v1.2.3 From 132db31ca9ee305ca1725db361096ab2230511c7 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Mon, 17 Jul 2017 19:42:35 +0300 Subject: iwlwifi: introduce Image Loader (IML) - new firmware image In future devices a new image will be introduced - IML. The IML, image loader, is loaded by the ROM, and as part of the new self-init flow, loads the rest of the firmware images to the device. Store the image, so the ROM can load it to the device. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/file.h | 3 +++ drivers/net/wireless/intel/iwlwifi/fw/img.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 8 ++++++++ drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 5 +++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 +++ 5 files changed, 25 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9b2805e1e3b1..9d939cbaf6c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,6 +145,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, + IWL_UCODE_TLV_IML = 52, }; struct iwl_ucode_tlv { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index b23ffe12ad84..f4912382b6af 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -241,6 +243,8 @@ enum iwl_fw_type { * @ucode_ver: ucode version from the ucode file * @fw_version: firmware version string * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. + * @iml_len: length of the image loader image + * @iml: image loader fw image * @ucode_capa: capabilities parsed from the ucode file. * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. @@ -267,6 +271,8 @@ struct iwl_fw { /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; + size_t iml_len; + u8 *iml; struct iwl_ucode_capabilities ucode_capa; bool enhance_sensitivity_table; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index aa2d5c14e202..4f451a6f20b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); kfree(drv->fw.dbg_mem_tlv); + kfree(drv->fw.iml); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -1126,6 +1127,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, pieces->n_dbg_mem_tlv++; break; } + case IWL_UCODE_TLV_IML: { + drv->fw.iml_len = tlv_len; + drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL); + if (!drv->fw.iml) + return -ENOMEM; + break; + } default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index a92832711683..1b9c627ee34d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -691,6 +691,8 @@ enum iwl_plat_pm_mode { * @wide_cmd_header: true when ucode supports wide command header format * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() + * @iml_len: the length of the image loader + * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before @@ -735,6 +737,9 @@ struct iwl_trans { u8 num_rx_queues; + size_t iml_len; + u8 *iml; + /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; char dev_cmd_pool_name[50]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index de46e6258a5c..ff1e518096c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -739,6 +739,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, sizeof(trans->dbg_conf_tlv)); trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; + trans->iml = mvm->fw->iml; + trans->iml_len = mvm->fw->iml_len; + /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); -- cgit v1.2.3 From 4efc272a45c112bf539da14d480be0448abe4fac Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 13 Feb 2018 13:52:43 +0200 Subject: iwlwifi: cfg: remove unnecessary cfg data in non-dvm devices The max_data_size and max_inst_size values are only needed for DVM devices. Remove the assignment to those fields in 7000 and newer families so we can also remove the otherwise unnecessary inclusion of iwl-agn.h headers. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/22000.c | 3 --- drivers/net/wireless/intel/iwlwifi/cfg/7000.c | 11 ++++------- drivers/net/wireless/intel/iwlwifi/cfg/8000.c | 9 +++------ drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 3 --- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 4 ++-- 5 files changed, 9 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index afc78a1e1cfc..d4ba66aecdc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -54,7 +54,6 @@ #include #include #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL_22000_UCODE_API_MAX 38 @@ -115,8 +114,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_22000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl_22000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 8e9684e8e603..69bfa827e82a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -7,8 +7,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,8 +35,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,7 +70,6 @@ #include #include #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 @@ -162,8 +161,6 @@ static const struct iwl_ht_params iwl7000_ht_params = { #define IWL_DEVICE_7000_COMMON \ .device_family = IWL_DEVICE_FAMILY_7000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index e8299b2ab6bd..7262e973e0d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -7,8 +7,8 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,7 +35,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,7 +69,6 @@ #include #include #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL8000_UCODE_API_MAX 36 @@ -142,8 +141,6 @@ static const struct iwl_tt_params iwl8000_tt_params = { #define IWL_DEVICE_8000_COMMON \ .device_family = IWL_DEVICE_FAMILY_8000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index fe376fce3444..9706624911f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -54,7 +54,6 @@ #include #include #include "iwl-config.h" -#include "iwl-agn-hw.h" #include "fw/file.h" /* Highest firmware API version supported */ @@ -135,8 +134,6 @@ static const struct iwl_tt_params iwl9000_tt_params = { .ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_9000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 7f0b7cea8b9b..7752794d0437 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -337,8 +337,8 @@ struct iwl_csr_params { * next step. Supported only in integrated solutions. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. - * @max_inst_size: The maximal length of the fw inst section - * @max_data_size: The maximal length of the fw data section + * @max_inst_size: The maximal length of the fw inst section (only DVM) + * @max_data_size: The maximal length of the fw data section (only DVM) * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna * @non_shared_ant: the antenna that is for WiFi only -- cgit v1.2.3 From bf1ad8978bbfd8579dfd10d2e7742d0f554e449a Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 13 Feb 2018 18:41:46 +0200 Subject: iwlwifi: pcie: allow sending pre-built A-MSDUs In case of A-MSDUs, the trans layer is taking care of building the subframes (out of the given skb), according to the given gso_size. However, in some testing flows, we want to build the whole A-MSDU frame in a different place (e.g. userspace), and ask the driver to send it as-is. In case of gso_size==0, simply treat the frame as normal-frame, although the A-MSDU flag is set. Signed-off-by: Eliad Peller Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 1cbb8f470afd..473fe7ccb07c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -2397,7 +2397,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - if (amsdu) { + /* + * If gso_size wasn't set, don't give the frame "amsdu treatment" + * (adding subframes, etc.). + * This can happen in some testing flows when the amsdu was already + * pre-built, and we just need to send the resulting skb. + */ + if (amsdu && skb_shinfo(skb)->gso_size) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) -- cgit v1.2.3 From 18e8f43ff13e0dd0a485bc35bea91df37c26c4bb Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Wed, 22 Nov 2017 11:40:15 +0200 Subject: iwlwifi: support new csr addresses for hw address In future devices we use different csr addresses for hw addresses. Update csr addresses to support new and legacy devices. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 20 ++++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 14 ++++++++++---- 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 7752794d0437..cfd14e5ea59c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -313,6 +313,10 @@ struct iwl_pwr_tx_backoff { * @flag_master_dis: disable master * @flag_stop_master: stop master * @addr_sw_reset: address for resetting the device + * @mac_addr0_otp: first part of MAC address from OTP + * @mac_addr1_otp: second part of MAC address from OTP + * @mac_addr0_strap: first part of MAC address from strap + * @mac_addr1_strap: second part of MAC address from strap */ struct iwl_csr_params { u8 flag_sw_reset; @@ -323,6 +327,10 @@ struct iwl_csr_params { u8 flag_master_dis; u8 flag_stop_master; u8 addr_sw_reset; + u32 mac_addr0_otp; + u32 mac_addr1_otp; + u32 mac_addr0_strap; + u32 mac_addr1_strap; }; /** @@ -451,7 +459,11 @@ static const struct iwl_csr_params iwl_csr_v1 = { .flag_sw_reset = 7, .flag_master_dis = 8, .flag_stop_master = 9, - .addr_sw_reset = (CSR_BASE + 0x020) + .addr_sw_reset = (CSR_BASE + 0x020), + .mac_addr0_otp = 0x380, + .mac_addr1_otp = 0x384, + .mac_addr0_strap = 0x388, + .mac_addr1_strap = 0x38C }; static const struct iwl_csr_params iwl_csr_v2 = { @@ -462,7 +474,11 @@ static const struct iwl_csr_params iwl_csr_v2 = { .flag_master_dis = 28, .flag_stop_master = 29, .flag_sw_reset = 31, - .addr_sw_reset = (CSR_BASE + 0x024) + .addr_sw_reset = (CSR_BASE + 0x024), + .mac_addr0_otp = 0x30, + .mac_addr1_otp = 0x34, + .mac_addr0_strap = 0x38, + .mac_addr1_strap = 0x3C }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 1f31d7bfdffc..0f9d56420c42 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -579,8 +579,12 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { - __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); - __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + __le32 mac_addr0 = + cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr0_strap)); + __le32 mac_addr1 = + cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr1_strap)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* @@ -590,8 +594,10 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, if (is_valid_ether_addr(data->hw_addr)) return; - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + mac_addr0 = cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr0_otp)); + mac_addr1 = cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr1_otp)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } -- cgit v1.2.3 From 1e5b7750315a8c2242444acda495555e7d52a6bf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 20 Feb 2018 16:32:36 +0100 Subject: iwlwifi: mvm: move skb padding reservation earlier Future changes will require moving the HE radiotap data into the SKB head, but this means we need to have the alignment reservation done before that. To prepare, move the alignment reservation earlier here. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 34791628cfb3..bb63e75a9b7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -151,17 +151,9 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + len -= 2; pad_len = 2; - - /* - * If the device inserted padding it means that (it thought) - * the 802.11 header wasn't a multiple of 4 bytes long. In - * this case, reserve two bytes at the start of the SKB to - * align the payload properly in case we end up copying it. - */ - skb_reserve(skb, pad_len); } - len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -866,6 +858,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, return; } + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + /* + * If the device inserted padding it means that (it thought) + * the 802.11 header wasn't a multiple of 4 bytes long. In + * this case, reserve two bytes at the start of the SKB to + * align the payload properly in case we end up copying it. + */ + skb_reserve(skb, 2); + } + rx_status = IEEE80211_SKB_RXCB(skb); if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, -- cgit v1.2.3 From 9039d985811d5b109b58b202b7594fd24e433fed Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 13 Feb 2018 11:09:40 +0200 Subject: iwlwifi: fw: harden page loading code The page loading code trusts the data provided in the firmware images a bit too much and may cause a buffer overflow or copy unknown data if the block sizes don't match what we expect. To prevent potential problems, harden the code by checking if the sizes we are copying are what we expect. Cc: stable@vger.kernel.org Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/paging.c | 49 +++++++++++++++++++++----- 1 file changed, 41 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 1fec8e3a6b35..6afcfd1f0eec 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { - int sec_idx, idx; + int sec_idx, idx, ret; u32 offset = 0; /* @@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, */ if (sec_idx >= image->num_sec - 1) { IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(fwrt); - return -EINVAL; + ret = -EINVAL; + goto err; } /* copy the CSS block to the dram */ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx); + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { + IWL_ERR(fwrt, "CSS block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, - fwrt->fw_paging_db[0].fw_paging_size); + image->sec[sec_idx].len); dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, @@ -221,6 +229,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (block->fw_paging_size > image->sec[sec_idx].len - offset) { + IWL_ERR(fwrt, + "Paging: paging size is larger than remaining data in block %d\n", + idx); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, block->fw_paging_size); @@ -231,19 +247,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", - fwrt->fw_paging_db[idx].fw_paging_size, - idx); + block->fw_paging_size, idx); - offset += fwrt->fw_paging_db[idx].fw_paging_size; + offset += block->fw_paging_size; + + if (offset > image->sec[sec_idx].len) { + IWL_ERR(fwrt, + "Paging: offset goes over section size\n"); + ret = -EINVAL; + goto err; + } } /* copy the last paging block */ if (fwrt->num_of_pages_in_last_blk > 0) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (image->sec[sec_idx].len - offset > block->fw_paging_size) { + IWL_ERR(fwrt, + "Paging: last block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + image->sec[sec_idx].len - offset); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, @@ -255,6 +284,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, } return 0; + +err: + iwl_free_fw_paging(fwrt); + return ret; } static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, -- cgit v1.2.3 From de460ddd8b795ceca8a826771ef8feb016bb438d Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 14 Feb 2018 08:54:40 +0200 Subject: iwlwifi: fw: combine loading of last page block into main copy loop Now that we check and copy only the actual size of the page block, there is no need to treat the last block separately. Remove the mostly duplicate code and make the main copy loop handle also the last block. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/paging.c | 69 ++++++++++---------------- 1 file changed, 26 insertions(+), 43 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 6afcfd1f0eec..9b8dd7fe7112 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -221,25 +221,39 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, sec_idx++; /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. + * Copy the paging blocks to the dram. The loop index starts + * from 1 since the CSS block (index 0) was already copied to + * dram. We use num_of_paging_blk + 1 to account for that. */ - for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { + for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; - - if (block->fw_paging_size > image->sec[sec_idx].len - offset) { + int remaining = image->sec[sec_idx].len - offset; + int len = block->fw_paging_size; + + /* + * For the last block, we copy all that is remaining, + * for all other blocks, we copy fw_paging_size at a + * time. */ + if (idx == fwrt->num_of_paging_blk) { + len = remaining; + if (remaining != + fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) { + IWL_ERR(fwrt, + "Paging: last block contains more data than expected %d\n", + remaining); + ret = -EINVAL; + goto err; + } + } else if (block->fw_paging_size > remaining) { IWL_ERR(fwrt, - "Paging: paging size is larger than remaining data in block %d\n", - idx); + "Paging: not enough data in other in block %d (%d)\n", + idx, remaining); ret = -EINVAL; goto err; } memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - block->fw_paging_size); + image->sec[sec_idx].data + offset, len); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, @@ -247,40 +261,9 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", - block->fw_paging_size, idx); + len, idx); offset += block->fw_paging_size; - - if (offset > image->sec[sec_idx].len) { - IWL_ERR(fwrt, - "Paging: offset goes over section size\n"); - ret = -EINVAL; - goto err; - } - } - - /* copy the last paging block */ - if (fwrt->num_of_pages_in_last_blk > 0) { - struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; - - if (image->sec[sec_idx].len - offset > block->fw_paging_size) { - IWL_ERR(fwrt, - "Paging: last block is larger than paging size\n"); - ret = -EINVAL; - goto err; - } - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - image->sec[sec_idx].len - offset); - dma_sync_single_for_device(fwrt->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(fwrt, - "Paging: copied %d pages in the last block %d\n", - fwrt->num_of_pages_in_last_blk, idx); } return 0; -- cgit v1.2.3 From 49564a806fc5551ef22140e9aa4e29c6788d3eb0 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 12 Dec 2017 08:58:41 +0200 Subject: iwlwifi: pcie: remove non-responsive device If we fail to to grab NIC access because the device is not responding (i.e. CSR_GP_CNTRL returns 0xFFFFFFFF), remove the device from the PCI bus, to avoid any further damage, and to let the user space rescan. In order to inform the userspace that a rescan is needed, we send a kobject uevent with "INACCESSIBLE". This functionality is disabled by default, but can be enabled via a new module parameter called "remove_when_gone". In the future we may change this module parameter to include 3 modes instead: do nothing; auto-rescan or; send uevent. Signed-off-by: Luca Coelho Signed-off-by: Rajat Jain --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 6 ++ drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 2 + drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 5 ++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 74 +++++++++++++++++++++- 4 files changed, 84 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4f451a6f20b9..c59ce4f8a5ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1850,3 +1850,9 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); + +module_param_named(remove_when_gone, + iwlwifi_mod_params.remove_when_gone, bool, + 0444); +MODULE_PARM_DESC(remove_when_gone, + "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index a41c46e63eb1..a7dd8a8cddf9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -122,6 +122,7 @@ enum iwl_uapsd_disable { * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. + * @remove_when_gone: remove an inaccessible device from the PCIe bus. */ struct iwl_mod_params { int swcrypto; @@ -143,6 +144,7 @@ struct iwl_mod_params { bool lar_disable; bool fw_monitor; bool disable_11ac; + bool remove_when_gone; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cda66340d357..45ea32796cda 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -383,6 +383,8 @@ struct iwl_self_init_dram { * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes + * @in_rescan: true if we have triggered a device rescan + * @scheduled_for_removal: true if we have scheduled a device removal */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -464,6 +466,9 @@ struct iwl_trans_pcie { u32 fh_mask; u32 hw_mask; cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; + u16 tx_cmd_queue_size; + bool in_rescan; + bool scheduled_for_removal; }; static inline struct iwl_trans_pcie * diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 623476603cd4..6e9a9ecfb11c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "iwl-drv.h" #include "iwl-trans.h" @@ -1935,6 +1936,29 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) clear_bit(STATUS_TPOWER_PMI, &trans->status); } +struct iwl_trans_pcie_removal { + struct pci_dev *pdev; + struct work_struct work; +}; + +static void iwl_trans_pcie_removal_wk(struct work_struct *wk) +{ + struct iwl_trans_pcie_removal *removal = + container_of(wk, struct iwl_trans_pcie_removal, work); + struct pci_dev *pdev = removal->pdev; + char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + + dev_err(&pdev->dev, "Device gone - attempting removal\n"); + kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); + pci_lock_rescan_remove(); + pci_dev_put(pdev); + pci_stop_and_remove_bus_device(pdev); + pci_unlock_rescan_remove(); + + kfree(removal); + module_put(THIS_MODULE); +} + static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, unsigned long *flags) { @@ -1977,11 +2001,55 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { - iwl_trans_pcie_dump_regs(trans); - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); + WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", - iwl_read32(trans, CSR_GP_CNTRL)); + cntrl); + + iwl_trans_pcie_dump_regs(trans); + + if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { + struct iwl_trans_pcie_removal *removal; + + if (trans_pcie->scheduled_for_removal) + goto err; + + IWL_ERR(trans, "Device gone - scheduling removal!\n"); + + /* + * get a module reference to avoid doing this + * while unloading anyway and to avoid + * scheduling a work with code that's being + * removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, + "Module is being unloaded - abort\n"); + goto err; + } + + removal = kzalloc(sizeof(*removal), GFP_ATOMIC); + if (!removal) { + module_put(THIS_MODULE); + goto err; + } + /* + * we don't need to clear this flag, because + * the trans will be freed and reallocated. + */ + trans_pcie->scheduled_for_removal = true; + + removal->pdev = to_pci_dev(trans->dev); + INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); + pci_dev_get(removal->pdev); + schedule_work(&removal->work); + } else { + iwl_write32(trans, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + } + +err: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); return false; } -- cgit v1.2.3 From c3039b10df74818bdd1d7e9d2b8706c855c4394b Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 22 Feb 2018 14:50:06 +0200 Subject: iwlwifi: make bitfield a u32 instead of u16 The bitfield we use in struct iwl_cfg contains 17 bits, but we declare it as u16. The compiler doesn't seem to have any problems with that (it probably automatically makes it u32), but it's cleaner to use a type that is long enough for all the flags. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index cfd14e5ea59c..c503b26793f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -420,7 +420,7 @@ struct iwl_cfg { u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; - u16 rx_with_siso_diversity:1, + u32 rx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, -- cgit v1.2.3 From 755a654cada277b1851737845f63c7ab2059c982 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 4 Feb 2018 15:01:37 +0200 Subject: iwlwifi: mvm: remove check for non low latency TIDs Firmware will only send non low-latency TIDs in the bitmap, so the check is now redundant. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index d0916f2552e2..df4c60496f72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -803,7 +803,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || - tid_to_mac80211_ac[tid] < IEEE80211_AC_BE || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); -- cgit v1.2.3 From d3a6f7fb97fc34a38e40cc56392e701598f99863 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Thu, 22 Feb 2018 18:45:49 +0200 Subject: iwlwifi: mvm: set wakeup filters for wowlan "any" configuration In case of "any" wowlan trigger is configured, no valid wakeup filter was configured. Moreover, the fw assumes there's no connection when there are no configured wakeup filters. This leads to the station info not being updated on D3 command, causing rate_n_flags to be 0 when the offloading code sends tx frame (triggering SYSASSERT_102C due to invalid antenna param) Note: "any" trigger is currently assumed to only be used when entering d0i3 (which has a different flow). However, we still reach this code when using d3_test. Signed-off-by: Eliad Peller Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 80a9a7cb83be..3fcf489f3120 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -693,6 +690,14 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, IWL_WOWLAN_WAKEUP_LINK_CHANGE); } + if (wowlan->any) { + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BCN_FILTERING); + } + return 0; } -- cgit v1.2.3 From 2f177c1628c3f54cdfcc8093cd4b5297f4baeec4 Mon Sep 17 00:00:00 2001 From: Venkateswara Naralasetty Date: Wed, 25 Apr 2018 11:36:40 +0300 Subject: ath10k: fix information leak in debugfs During write to some of debugfs in ath10k, few variables exposing stack data when process user input. which leads to possible information leak. This patch fix this issue by initializing buffer and checks the return valure of 'simple_write_to_buffer'. Signed-off-by: Venkateswara Naralasetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 20 +++++++++--------- drivers/net/wireless/ath/ath10k/debugfs_sta.c | 30 +++++++++++++-------------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index bac832ce1873..57d22cd976e5 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -987,13 +987,13 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, { struct ath10k *ar = file->private_data; int res; - char buf[64]; + char buf[64] = {0}; unsigned int amsdu, ampdu; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = 0; + res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (res <= 0) + return res; res = sscanf(buf, "%u %u", &amsdu, &du); @@ -1043,14 +1043,14 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, { struct ath10k *ar = file->private_data; int ret; - char buf[96]; + char buf[96] = {0}; unsigned int log_level; u64 mask; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = 0; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%llx %u", &mask, &log_level); diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 8f688f136c22..a63c97e2c50c 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -254,12 +254,12 @@ static ssize_t ath10k_dbg_sta_write_addba(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, buf_size; int ret; - char buf[64]; + char buf[64] = {0}; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u", &tid, &buf_size); if (ret != 2) @@ -305,12 +305,12 @@ static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, status; int ret; - char buf[64]; - - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + char buf[64] = {0}; - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u", &tid, &status); if (ret != 2) @@ -355,12 +355,12 @@ static ssize_t ath10k_dbg_sta_write_delba(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, initiator, reason; int ret; - char buf[64]; - - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + char buf[64] = {0}; - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason); if (ret != 3) -- cgit v1.2.3 From 4b190675ad06f5a6ecbeef0b01890c5fb372e3eb Mon Sep 17 00:00:00 2001 From: Tamizh Chelvam Date: Wed, 25 Apr 2018 11:36:44 +0300 Subject: ath10k: fix kernel panic while reading tpc_stats When attempt to read tpc_stats for the chipsets which support more than 3 tx chain will trigger kernel panic(kernel stack is corrupted) due to writing values on rate_code array out of range. This patch changes the array size depends on the WMI_TPC_TX_N_CHAIN and added check to avoid write values on the array if the num tx chain get in tpc config event is greater than WMI_TPC_TX_N_CHAIN. Tested on QCA9984 with firmware-5.bin_10.4-3.5.3-00057 Kernel panic log : [ 323.510944] Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: bf90c654 [ 323.510944] [ 323.524390] CPU: 0 PID: 1908 Comm: cat Not tainted 3.14.77 #31 [ 323.530224] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 323.537941] [] (show_stack) from [] (dump_stack+0x80/0xa0) [ 323.545146] [] (dump_stack) from [] (panic+0x84/0x1e4) [ 323.552000] [] (panic) from [] (__stack_chk_fail+0x10/0x14) [ 323.559350] [] (__stack_chk_fail) from [] (ath10k_wmi_event_pdev_tpc_config+0x424/0x438 [ath10k_core]) [ 323.570471] [] (ath10k_wmi_event_pdev_tpc_config [ath10k_core]) from [] (ath10k_wmi_10_4_op_rx+0x2f0/0x39c [ath10k_core]) [ 323.583047] [] (ath10k_wmi_10_4_op_rx [ath10k_core]) from [] (ath10k_htc_rx_completion_handler+0x170/0x1a0 [ath10k_core]) [ 323.595702] [] (ath10k_htc_rx_completion_handler [ath10k_core]) from [] (ath10k_pci_hif_send_complete_check+0x1f0/0x220 [ath10k_pci]) [ 323.609421] [] (ath10k_pci_hif_send_complete_check [ath10k_pci]) from [] (ath10k_ce_per_engine_service+0x74/0xc4 [ath10k_pci]) [ 323.622490] [] (ath10k_ce_per_engine_service [ath10k_pci]) from [] (ath10k_ce_per_engine_service_any+0x74/0x80 [ath10k_pci]) [ 323.635423] [] (ath10k_ce_per_engine_service_any [ath10k_pci]) from [] (ath10k_pci_napi_poll+0x44/0xe8 [ath10k_pci]) [ 323.647665] [] (ath10k_pci_napi_poll [ath10k_pci]) from [] (net_rx_action+0xac/0x160) [ 323.657208] [] (net_rx_action) from [] (__do_softirq+0x104/0x294) [ 323.665017] [] (__do_softirq) from [] (irq_exit+0x9c/0x11c) [ 323.672314] [] (irq_exit) from [] (handle_IRQ+0x6c/0x90) [ 323.679341] [] (handle_IRQ) from [] (gic_handle_irq+0x3c/0x60) [ 323.686893] [] (gic_handle_irq) from [] (__irq_svc+0x40/0x70) [ 323.694349] Exception stack(0xdd489c58 to 0xdd489ca0) [ 323.699384] 9c40: 00000000 a0000013 [ 323.707547] 9c60: 00000000 dc4bce40 60000013 ddc1d800 dd488000 00000990 00000000 c085c800 [ 323.715707] 9c80: 00000000 dd489d44 0000092d dd489ca0 c026e664 c026e668 60000013 ffffffff [ 323.723877] [] (__irq_svc) from [] (rcu_note_context_switch+0x170/0x184) [ 323.732298] [] (rcu_note_context_switch) from [] (__schedule+0x50/0x4d4) [ 323.740716] [] (__schedule) from [] (schedule_timeout+0x148/0x178) [ 323.748611] [] (schedule_timeout) from [] (wait_for_common+0x114/0x154) [ 323.756972] [] (wait_for_common) from [] (ath10k_tpc_stats_open+0xc8/0x340 [ath10k_core]) [ 323.766873] [] (ath10k_tpc_stats_open [ath10k_core]) from [] (do_dentry_open+0x1ac/0x274) [ 323.776741] [] (do_dentry_open) from [] (do_last+0x8c0/0xb08) [ 323.784201] [] (do_last) from [] (path_openat+0x210/0x598) [ 323.791408] [] (path_openat) from [] (do_filp_open+0x2c/0x78) [ 323.798873] [] (do_filp_open) from [] (do_sys_open+0x114/0x1b4) [ 323.806509] [] (do_sys_open) from [] (ret_fast_syscall+0x0/0x44) [ 323.814241] CPU1: stopping [ 323.816927] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.14.77 #31 [ 323.823008] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 323.830731] [] (show_stack) from [] (dump_stack+0x80/0xa0) [ 323.837934] [] (dump_stack) from [] (handle_IPI+0xb8/0x140) [ 323.845224] [] (handle_IPI) from [] (gic_handle_irq+0x58/0x60) [ 323.852774] [] (gic_handle_irq) from [] (__irq_svc+0x40/0x70) [ 323.860233] Exception stack(0xdd499fa0 to 0xdd499fe8) [ 323.865273] 9fa0: ffffffed 00000000 1d3c9000 00000000 dd498000 dd498030 10c0387d c08b62c8 [ 323.873432] 9fc0: 4220406a 512f04d0 00000000 00000000 00000001 dd499fe8 c021838c c0218390 [ 323.881588] 9fe0: 60000013 ffffffff [ 323.885070] [] (__irq_svc) from [] (arch_cpu_idle+0x30/0x50) [ 323.892454] [] (arch_cpu_idle) from [] (cpu_startup_entry+0xa4/0x108) [ 323.900690] [] (cpu_startup_entry) from [<422085a4>] (0x422085a4) Signed-off-by: Tamizh chelvam Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 8 +++++++- drivers/net/wireless/ath/ath10k/wmi.c | 6 ++++++ drivers/net/wireless/ath/ath10k/wmi.h | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 57d22cd976e5..0d98c93a3aba 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1519,7 +1519,13 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, - "No. Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n"); + "No. Preamble Rate_code "); + + for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + *len += scnprintf(buf + *len, buf_len - *len, + "tpc_value%d ", i); + + *len += scnprintf(buf + *len, buf_len - *len, "\n"); for (i = 0; i < tpc_stats->rate_max; i++) { *len += scnprintf(buf + *len, buf_len - *len, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index df2e92a6c9bd..d7dba1791a1a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4484,6 +4484,12 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 16a39244a34f..700b12f46baf 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4026,9 +4026,9 @@ struct wmi_pdev_get_tpc_config_cmd { } __packed; #define WMI_TPC_CONFIG_PARAM 1 -#define WMI_TPC_RATE_MAX 160 #define WMI_TPC_FINAL_RATE_MAX 240 #define WMI_TPC_TX_N_CHAIN 4 +#define WMI_TPC_RATE_MAX (WMI_TPC_TX_N_CHAIN * 65) #define WMI_TPC_PREAM_TABLE_MAX 10 #define WMI_TPC_FLAG 3 #define WMI_TPC_BUF_SIZE 10 -- cgit v1.2.3 From 378b1d65070f3c989a662555eda8564cdd937b4a Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 24 Apr 2018 15:18:59 +0200 Subject: ath6kl: fix ath6kl_data_tx()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/core.h | 2 +- drivers/net/wireless/ath/ath6kl/txrx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index e23d450babd2..0d30e762c090 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -914,7 +914,7 @@ void ath6kl_tx_data_cleanup(struct ath6kl *ar); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); struct aggr_info *aggr_init(struct ath6kl_vif *vif); void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 8da9506f8c2b..618d12ed4b40 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -353,7 +353,7 @@ fail_ctrl_tx: return status; } -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) { struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_cookie *cookie = NULL; -- cgit v1.2.3 From 785281342d0ce06ec04e4b730fb29b6320139edc Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 26 Apr 2018 10:12:44 +0100 Subject: ath10k: fix spelling mistake: "servive" -> "service" Trivial fix to spelling mistake in ath10k_warn warning message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index d7dba1791a1a..bb8e9e259a56 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5286,7 +5286,7 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg); if (ret) { - ath10k_warn(ar, "failed to parse servive available event: %d\n", + ath10k_warn(ar, "failed to parse service available event: %d\n", ret); } -- cgit v1.2.3 From e60a92590187d01f1c98254d2d7e8f613ebd31dd Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Thu, 26 Apr 2018 14:35:02 +0200 Subject: ath10k: sdio: jump to correct label in error handling path Jump to the correct label in error handling path. At this point of execution create_singlethread_workqueue() has succeeded, so it should be properly destroyed. Jump label was renamed in commit ec2c64e20257 ("ath10k: sdio: fix memory leak for probe allocations"). However, the bug was originally introduced in commit d96db25d2025 ("ath10k: add initial SDIO support"). Fixes: d96db25d2025 ("ath10k: add initial SDIO support") Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 2d04c54a4153..d612ce8c9cff 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2011,7 +2011,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, ret = -ENODEV; ath10k_err(ar, "unsupported device id %u (0x%x)\n", dev_id_base, id->device); - goto err_core_destroy; + goto err_free_wq; } ar->id.vendor = id->vendor; -- cgit v1.2.3 From c59530d0d5dccc96795af12c139f618182cf98db Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 25 Apr 2018 12:12:47 -0700 Subject: net: Move PHY statistics code into PHY library helpers In order to make it possible for network device drivers that do not necessarily have a phy_device attached, but still report PHY statistics, have a preliminary refactoring consisting in creating helper functions that encapsulate the PHY device driver knowledge within PHYLIB. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 20 ++++++++++++++++++++ net/core/ethtool.c | 38 ++++++++------------------------------ 3 files changed, 76 insertions(+), 30 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 05c1e8ef15e6..a98ed12c0009 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1277,3 +1277,51 @@ int phy_ethtool_nway_reset(struct net_device *ndev) return phy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); + +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_strings(phydev, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_strings); + +int phy_ethtool_get_sset_count(struct phy_device *phydev) +{ + int ret; + + if (!phydev->drv) + return -EIO; + + if (phydev->drv->get_sset_count && + phydev->drv->get_strings && + phydev->drv->get_stats) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_sset_count(phydev); + mutex_unlock(&phydev->lock); + + return ret; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(phy_ethtool_get_sset_count); + +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_stats(phydev, stats, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_stats); diff --git a/include/linux/phy.h b/include/linux/phy.h index f0b5870a6d40..6ca81395c545 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1066,6 +1066,26 @@ int phy_ethtool_nway_reset(struct net_device *ndev); #if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); void mdio_bus_exit(void); +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); +int phy_ethtool_get_sset_count(struct phy_device *phydev); +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data); +#else +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +{ + return -EOPNOTSUPP; +} + +int phy_ethtool_get_sset_count(struct phy_device *phydev) +{ + return -EOPNOTSUPP; +} + +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + return -EOPNOTSUPP; +} #endif extern struct bus_type mdio_bus_type; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4650fd6d678c..efcd8e620796 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -211,23 +211,6 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) return ret; } -static int phy_get_sset_count(struct phy_device *phydev) -{ - int ret; - - if (phydev->drv->get_sset_count && - phydev->drv->get_strings && - phydev->drv->get_stats) { - mutex_lock(&phydev->lock); - ret = phydev->drv->get_sset_count(phydev); - mutex_unlock(&phydev->lock); - - return ret; - } - - return -EOPNOTSUPP; -} - static int __ethtool_get_sset_count(struct net_device *dev, int sset) { const struct ethtool_ops *ops = dev->ethtool_ops; @@ -246,7 +229,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) if (sset == ETH_SS_PHY_STATS) { if (dev->phydev) - return phy_get_sset_count(dev->phydev); + return phy_ethtool_get_sset_count(dev->phydev); else return -EOPNOTSUPP; } @@ -273,15 +256,10 @@ static void __ethtool_get_strings(struct net_device *dev, else if (stringset == ETH_SS_PHY_TUNABLES) memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); else if (stringset == ETH_SS_PHY_STATS) { - struct phy_device *phydev = dev->phydev; - - if (phydev) { - mutex_lock(&phydev->lock); - phydev->drv->get_strings(phydev, data); - mutex_unlock(&phydev->lock); - } else { + if (dev->phydev) + phy_ethtool_get_strings(dev->phydev, data); + else return; - } } else /* ops->get_strings is valid because checked earlier */ ops->get_strings(dev, stringset, data); @@ -2002,7 +1980,7 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (!phydev) return -EOPNOTSUPP; - n_stats = phy_get_sset_count(phydev); + n_stats = phy_ethtool_get_sset_count(dev->phydev); if (n_stats < 0) return n_stats; if (n_stats > S32_MAX / sizeof(u64)) @@ -2017,9 +1995,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (n_stats && !data) return -ENOMEM; - mutex_lock(&phydev->lock); - phydev->drv->get_stats(phydev, &stats, data); - mutex_unlock(&phydev->lock); + ret = phy_ethtool_get_stats(dev->phydev, &stats, data); + if (ret < 0) + return ret; ret = -EFAULT; if (copy_to_user(useraddr, &stats, sizeof(stats))) -- cgit v1.2.3 From 89f09048348936a9a8c5131c8538cc6ed26fd44c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 25 Apr 2018 12:12:50 -0700 Subject: net: dsa: Pass stringset to ethtool operations Up until now we largely assumed that we were interested in ETH_SS_STATS type of strings for all ethtool operations, this is about to change with the introduction of additional string sets, e.g: ETH_SS_PHY_STATS. Update all functions to take an appropriate stringset argument and act on it when it is different than ETH_SS_STATS for now. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 11 +++++++++-- drivers/net/dsa/b53/b53_priv.h | 5 +++-- drivers/net/dsa/dsa_loop.c | 11 +++++++++-- drivers/net/dsa/lan9303-core.c | 11 +++++++++-- drivers/net/dsa/microchip/ksz_common.c | 11 +++++++++-- drivers/net/dsa/mt7530.c | 11 +++++++++-- drivers/net/dsa/mv88e6xxx/chip.c | 10 ++++++++-- drivers/net/dsa/qca8k.c | 10 ++++++++-- include/net/dsa.h | 5 +++-- net/dsa/master.c | 21 +++++++++++++-------- net/dsa/slave.c | 5 +++-- 11 files changed, 83 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 78616787f2a3..726b2d8c6fe9 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -806,13 +806,17 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_SIZE; } -void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); unsigned int mib_size = b53_get_mib_size(dev); unsigned int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < mib_size; i++) strlcpy(data + i * ETH_GSTRING_LEN, mibs[i].name, ETH_GSTRING_LEN); @@ -852,10 +856,13 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) } EXPORT_SYMBOL(b53_get_ethtool_stats); -int b53_get_sset_count(struct dsa_switch *ds, int port) +int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct b53_device *dev = ds->priv; + if (sset != ETH_SS_STATS) + return 0; + return b53_get_mib_size(dev); } EXPORT_SYMBOL(b53_get_sset_count); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 1187ebd79287..b933d5cb5c2d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -286,9 +286,10 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev) /* Exported functions towards other drivers */ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); int b53_configure_vlan(struct dsa_switch *ds); -void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); +void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); -int b53_get_sset_count(struct dsa_switch *ds, int port); +int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index f77be9f85cb3..9354cc08d3fd 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -86,16 +86,23 @@ static int dsa_loop_setup(struct dsa_switch *ds) return 0; } -static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port) +static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return __DSA_LOOP_CNT_MAX; } -static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static void dsa_loop_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) { struct dsa_loop_priv *ps = ds->priv; unsigned int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < __DSA_LOOP_CNT_MAX; i++) memcpy(data + i * ETH_GSTRING_LEN, ps->ports[port].mib[i].name, ETH_GSTRING_LEN); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index fefa454f3e56..b4f6e1a67dd9 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -977,10 +977,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = { { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", }, }; -static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static void lan9303_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) { unsigned int u; + if (stringset != ETH_SS_STATS) + return; + for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, ETH_GSTRING_LEN); @@ -1007,8 +1011,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, } } -static int lan9303_get_sset_count(struct dsa_switch *ds, int port) +static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(lan9303_mib); } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index bcb3e6c734f2..7210c49b7922 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -439,15 +439,22 @@ static void ksz_disable_port(struct dsa_switch *ds, int port, ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); } -static int ksz_sset_count(struct dsa_switch *ds, int port) +static int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return TOTAL_SWITCH_COUNTER_NUM; } -static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf) +static void ksz_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, ETH_GSTRING_LEN); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 80a4dbc3a499..62e486652e62 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -573,10 +573,14 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, } static void -mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, ETH_GSTRING_LEN); @@ -604,8 +608,11 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -mt7530_get_sset_count(struct dsa_switch *ds, int port) +mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(mt7530_mib); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 3d2091099f7f..8f92ccc0dd54 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -742,11 +742,14 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data) } static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, - uint8_t *data) + u32 stringset, uint8_t *data) { struct mv88e6xxx_chip *chip = ds->priv; int count = 0; + if (stringset != ETH_SS_STATS) + return; + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_strings) @@ -789,12 +792,15 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip) STATS_TYPE_BANK1); } -static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port) +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct mv88e6xxx_chip *chip = ds->priv; int serdes_count = 0; int count = 0; + if (sset != ETH_SS_STATS) + return 0; + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_sset_count) count = chip->info->ops->stats_get_sset_count(chip); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 600d5ad1fbde..757b6d90ea36 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -600,10 +600,13 @@ qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) } static void -qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, ETH_GSTRING_LEN); @@ -631,8 +634,11 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -qca8k_get_sset_count(struct dsa_switch *ds, int port) +qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(ar8327_mib); } diff --git a/include/net/dsa.h b/include/net/dsa.h index 60fb4ec8ba61..0bc0aad1b02e 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -356,10 +356,11 @@ struct dsa_switch_ops { /* * ethtool hardware statistics. */ - void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); + void (*get_strings)(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data); void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); - int (*get_sset_count)(struct dsa_switch *ds, int port); + int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); /* * ethtool Wake-on-LAN diff --git a/net/dsa/master.c b/net/dsa/master.c index 9ec16b39ed15..8d27687fd0ca 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -38,11 +38,14 @@ static int dsa_master_get_sset_count(struct net_device *dev, int sset) struct dsa_switch *ds = cpu_dp->ds; int count = 0; - if (ops->get_sset_count) - count += ops->get_sset_count(dev, sset); + if (ops->get_sset_count) { + count = ops->get_sset_count(dev, sset); + if (count < 0) + count = 0; + } - if (sset == ETH_SS_STATS && ds->ops->get_sset_count) - count += ds->ops->get_sset_count(ds, cpu_dp->index); + if (ds->ops->get_sset_count) + count += ds->ops->get_sset_count(ds, cpu_dp->index, sset); return count; } @@ -65,18 +68,20 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, pfx[sizeof(pfx) - 1] = '_'; if (ops->get_sset_count && ops->get_strings) { - mcount = ops->get_sset_count(dev, ETH_SS_STATS); + mcount = ops->get_sset_count(dev, stringset); + if (mcount < 0) + mcount = 0; ops->get_strings(dev, stringset, data); } - if (stringset == ETH_SS_STATS && ds->ops->get_strings) { + if (ds->ops->get_strings) { ndata = data + mcount * len; /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle * the output after to prepend our CPU port prefix we * constructed earlier */ - ds->ops->get_strings(ds, port, ndata); - count = ds->ops->get_sset_count(ds, port); + ds->ops->get_strings(ds, port, stringset, ndata); + count = ds->ops->get_sset_count(ds, port, stringset); for (i = 0; i < count; i++) { memmove(ndata + (i * len + sizeof(pfx)), ndata + i * len, len - sizeof(pfx)); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 18561af7a8f1..f3fb3a0880b1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -560,7 +560,8 @@ static void dsa_slave_get_strings(struct net_device *dev, strncpy(data + 2 * len, "rx_packets", len); strncpy(data + 3 * len, "rx_bytes", len); if (ds->ops->get_strings) - ds->ops->get_strings(ds, dp->index, data + 4 * len); + ds->ops->get_strings(ds, dp->index, stringset, + data + 4 * len); } } @@ -605,7 +606,7 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) count = 4; if (ds->ops->get_sset_count) - count += ds->ops->get_sset_count(ds, dp->index); + count += ds->ops->get_sset_count(ds, dp->index, sset); return count; } -- cgit v1.2.3 From c7d28c9df292a49904446dca15b2037ee8f874af Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 25 Apr 2018 12:12:53 -0700 Subject: net: dsa: b53: Add support for reading PHY statistics Allow the b53 driver to return PHY statistics when the CPU port used is different than 5, 7 or 8, because those are typically PHY-less on most devices. This is useful for debugging link problems between the switch and an external host when using a non standard CPU port number (e.g: 4). Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 78 ++++++++++++++++++++++++++++++++++------ drivers/net/dsa/b53/b53_priv.h | 1 + drivers/net/dsa/bcm_sf2.c | 1 + 3 files changed, 70 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 726b2d8c6fe9..9f561fe505cb 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -806,20 +806,39 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_SIZE; } +static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) +{ + /* These ports typically do not have built-in PHYs */ + switch (port) { + case B53_CPU_PORT_25: + case 7: + case B53_CPU_PORT: + return NULL; + } + + return mdiobus_get_phy(ds->slave_mii_bus, port); +} + void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); unsigned int mib_size = b53_get_mib_size(dev); + struct phy_device *phydev; unsigned int i; - if (stringset != ETH_SS_STATS) - return; + if (stringset == ETH_SS_STATS) { + for (i = 0; i < mib_size; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + mibs[i].name, ETH_GSTRING_LEN); + } else if (stringset == ETH_SS_PHY_STATS) { + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return; - for (i = 0; i < mib_size; i++) - strlcpy(data + i * ETH_GSTRING_LEN, - mibs[i].name, ETH_GSTRING_LEN); + phy_ethtool_get_strings(phydev, data); + } } EXPORT_SYMBOL(b53_get_strings); @@ -856,14 +875,34 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) } EXPORT_SYMBOL(b53_get_ethtool_stats); +void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct phy_device *phydev; + + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return; + + phy_ethtool_get_stats(phydev, NULL, data); +} +EXPORT_SYMBOL(b53_get_ethtool_phy_stats); + int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct b53_device *dev = ds->priv; + struct phy_device *phydev; - if (sset != ETH_SS_STATS) - return 0; + if (sset == ETH_SS_STATS) { + return b53_get_mib_size(dev); + } else if (sset == ETH_SS_PHY_STATS) { + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return 0; + + return phy_ethtool_get_sset_count(phydev); + } - return b53_get_mib_size(dev); + return 0; } EXPORT_SYMBOL(b53_get_sset_count); @@ -1484,7 +1523,7 @@ void b53_br_fast_age(struct dsa_switch *ds, int port) } EXPORT_SYMBOL(b53_br_fast_age); -static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) +static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) { /* Broadcom switches will accept enabling Broadcom tags on the * following ports: 5, 7 and 8, any other port is not supported @@ -1496,10 +1535,19 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) return true; } - dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port); return false; } +static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) +{ + bool ret = b53_possible_cpu_port(ds, port); + + if (!ret) + dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", + port); + return ret; +} + enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; @@ -1657,6 +1705,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .get_strings = b53_get_strings, .get_ethtool_stats = b53_get_ethtool_stats, .get_sset_count = b53_get_sset_count, + .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, @@ -1961,6 +2010,15 @@ static int b53_switch_init(struct b53_device *dev) dev->num_ports = dev->cpu_port + 1; dev->enabled_ports |= BIT(dev->cpu_port); + /* Include non standard CPU port built-in PHYs to be probed */ + if (is539x(dev) || is531x5(dev)) { + for (i = 0; i < dev->num_ports; i++) { + if (!(dev->ds->phys_mii_mask & BIT(i)) && + !b53_possible_cpu_port(dev->ds, i)) + dev->ds->phys_mii_mask |= BIT(i); + } + } + dev->ports = devm_kzalloc(dev->dev, sizeof(struct b53_port) * dev->num_ports, GFP_KERNEL); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index b933d5cb5c2d..cc284a514de9 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -290,6 +290,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); +void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 0378eded31f2..97236cfcbae4 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -859,6 +859,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .get_strings = b53_get_strings, .get_ethtool_stats = b53_get_ethtool_stats, .get_sset_count = b53_get_sset_count, + .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, .adjust_link = bcm_sf2_sw_adjust_link, .fixed_link_update = bcm_sf2_sw_fixed_link_update, -- cgit v1.2.3 From 96cbddcd52e76d9052948e408b17bedc8aa1c11a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 25 Apr 2018 12:12:54 -0700 Subject: net: dsa: loop: Hook PHY statistics We just return the same statistics through ethtool_get_stats() and ethtool_get_phy_stats() for simplicity since this is just a mock-up driver. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 9354cc08d3fd..58f14af04639 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -88,7 +88,7 @@ static int dsa_loop_setup(struct dsa_switch *ds) static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset) { - if (sset != ETH_SS_STATS) + if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS) return 0; return __DSA_LOOP_CNT_MAX; @@ -100,7 +100,7 @@ static void dsa_loop_get_strings(struct dsa_switch *ds, int port, struct dsa_loop_priv *ps = ds->priv; unsigned int i; - if (stringset != ETH_SS_STATS) + if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS) return; for (i = 0; i < __DSA_LOOP_CNT_MAX; i++) @@ -263,6 +263,7 @@ static const struct dsa_switch_ops dsa_loop_driver = { .get_strings = dsa_loop_get_strings, .get_ethtool_stats = dsa_loop_get_ethtool_stats, .get_sset_count = dsa_loop_get_sset_count, + .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats, .phy_read = dsa_loop_phy_read, .phy_write = dsa_loop_phy_write, .port_bridge_join = dsa_loop_port_bridge_join, -- cgit v1.2.3 From 1ecd6e8ad9964273d2b540196784bc3c9b17053b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Apr 2018 13:11:25 -0400 Subject: phy: Temporary build fix after phylib changes. Make PHYLIB boolean, because we reference phylib provided symbols now from net/core/ethtool.c and therefore 'm' doesn't work. Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index edb8b9ab827f..7c5e8c1e9370 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -9,7 +9,6 @@ menuconfig MDIO_DEVICE config MDIO_BUS tristate - default m if PHYLIB=m default MDIO_DEVICE help This internal symbol is used for link time dependencies and it @@ -171,7 +170,7 @@ config PHYLINK autonegotiation modes. menuconfig PHYLIB - tristate "PHY Device support and infrastructure" + bool "PHY Device support and infrastructure" depends on NETDEVICES select MDIO_DEVICE help -- cgit v1.2.3 From f058ca6b06d4c4685201de3760b1962641aad445 Mon Sep 17 00:00:00 2001 From: Pradeep Nalla Date: Wed, 25 Apr 2018 17:00:12 -0700 Subject: liquidio: move a couple of functions to lio_core.c To support the next patch in this series which has code that calls octnet_get_link_stats from two different .c files, move that function (and its dependency octnet_nic_stats_callback) to lio_core.c. Remove octnet_get_link_stats's static declaration and add its function prototype in octeon_network.h. Signed-off-by: Pradeep Nalla Acked-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 152 ++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 157 --------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 + 3 files changed, 154 insertions(+), 157 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 2a94eee943b2..334a068b6a62 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1169,3 +1169,155 @@ int lio_wait_for_clean_oq(struct octeon_device *oct) return pending_pkts; } + +static void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = + (struct oct_nic_stats_resp *)sc->virtrptr; + struct oct_nic_stats_ctrl *ctrl = + (struct oct_nic_stats_ctrl *)sc->ctxptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + rstats->red_drops = rsp_rstats->red_drops; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; + rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; + + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; + + resp->status = 1; + } else { + resp->status = -1; + } + complete(&ctrl->complete); +} + +int octnet_get_link_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_ctrl *ctrl; + struct oct_nic_stats_resp *resp; + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; + memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); + ctrl->netdev = netdev; + init_completion(&ctrl->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + sc->callback = octnet_nic_stats_callback; + sc->callback_arg = sc; + sc->wait_time = 500; /*in milli seconds*/ + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + + if (resp->status != 1) { + octeon_free_soft_command(oct_dev, sc); + + return -EINVAL; + } + + octeon_free_soft_command(oct_dev, sc); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 9926a12dd805..351549c83472 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -32,7 +32,6 @@ #include "cn23xx_vf_device.h" static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); -static int octnet_get_link_stats(struct net_device *netdev); struct oct_intrmod_context { int octeon_id; @@ -1776,162 +1775,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio, return -EINTR; } -static void -octnet_nic_stats_callback(struct octeon_device *oct_dev, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct oct_nic_stats_resp *resp = - (struct oct_nic_stats_resp *)sc->virtrptr; - struct oct_nic_stats_ctrl *ctrl = - (struct oct_nic_stats_ctrl *)sc->ctxptr; - struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; - struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; - - struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; - struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; - - if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { - octeon_swap_8B_data((u64 *)&resp->stats, - (sizeof(struct oct_link_stats)) >> 3); - - /* RX link-level stats */ - rstats->total_rcvd = rsp_rstats->total_rcvd; - rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; - rstats->total_bcst = rsp_rstats->total_bcst; - rstats->total_mcst = rsp_rstats->total_mcst; - rstats->runts = rsp_rstats->runts; - rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; - /* Accounts for over/under-run of buffers */ - rstats->fifo_err = rsp_rstats->fifo_err; - rstats->dmac_drop = rsp_rstats->dmac_drop; - rstats->fcs_err = rsp_rstats->fcs_err; - rstats->jabber_err = rsp_rstats->jabber_err; - rstats->l2_err = rsp_rstats->l2_err; - rstats->frame_err = rsp_rstats->frame_err; - rstats->red_drops = rsp_rstats->red_drops; - - /* RX firmware stats */ - rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; - rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; - rstats->fw_err_pko = rsp_rstats->fw_err_pko; - rstats->fw_err_link = rsp_rstats->fw_err_link; - rstats->fw_err_drop = rsp_rstats->fw_err_drop; - rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; - rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; - - /* Number of packets that are LROed */ - rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; - /* Number of octets that are LROed */ - rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; - /* Number of LRO packets formed */ - rstats->fw_total_lro = rsp_rstats->fw_total_lro; - /* Number of times lRO of packet aborted */ - rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; - rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; - rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; - rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; - rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; - /* intrmod: packet forward rate */ - rstats->fwd_rate = rsp_rstats->fwd_rate; - - /* TX link-level stats */ - tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; - tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; - tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; - tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; - tstats->ctl_sent = rsp_tstats->ctl_sent; - /* Packets sent after one collision*/ - tstats->one_collision_sent = rsp_tstats->one_collision_sent; - /* Packets sent after multiple collision*/ - tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; - /* Packets not sent due to max collisions */ - tstats->max_collision_fail = rsp_tstats->max_collision_fail; - /* Packets not sent due to max deferrals */ - tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; - /* Accounts for over/under-run of buffers */ - tstats->fifo_err = rsp_tstats->fifo_err; - tstats->runts = rsp_tstats->runts; - /* Total number of collisions detected */ - tstats->total_collisions = rsp_tstats->total_collisions; - - /* firmware stats */ - tstats->fw_total_sent = rsp_tstats->fw_total_sent; - tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; - tstats->fw_err_pko = rsp_tstats->fw_err_pko; - tstats->fw_err_pki = rsp_tstats->fw_err_pki; - tstats->fw_err_link = rsp_tstats->fw_err_link; - tstats->fw_err_drop = rsp_tstats->fw_err_drop; - tstats->fw_tso = rsp_tstats->fw_tso; - tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; - tstats->fw_err_tso = rsp_tstats->fw_err_tso; - tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; - - resp->status = 1; - } else { - resp->status = -1; - } - complete(&ctrl->complete); -} - -/* Configure interrupt moderation parameters */ -static int octnet_get_link_stats(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct_dev = lio->oct_dev; - - struct octeon_soft_command *sc; - struct oct_nic_stats_ctrl *ctrl; - struct oct_nic_stats_resp *resp; - - int retval; - - /* Alloc soft command */ - sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct_dev, - 0, - sizeof(struct oct_nic_stats_resp), - sizeof(struct octnic_ctrl_pkt)); - - if (!sc) - return -ENOMEM; - - resp = (struct oct_nic_stats_resp *)sc->virtrptr; - memset(resp, 0, sizeof(struct oct_nic_stats_resp)); - - ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; - memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); - ctrl->netdev = netdev; - init_completion(&ctrl->complete); - - sc->iq_no = lio->linfo.txpciq[0].s.q_no; - - octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, - OPCODE_NIC_PORT_STATS, 0, 0, 0); - - sc->callback = octnet_nic_stats_callback; - sc->callback_arg = sc; - sc->wait_time = 500; /*in milli seconds*/ - - retval = octeon_send_soft_command(oct_dev, sc); - if (retval == IQ_SEND_FAILED) { - octeon_free_soft_command(oct_dev, sc); - return -EINVAL; - } - - wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); - - if (resp->status != 1) { - octeon_free_soft_command(oct_dev, sc); - - return -EINVAL; - } - - octeon_free_soft_command(oct_dev, sc); - - return 0; -} - static int lio_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 4069710796a8..d090edd1a4a5 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -190,6 +190,8 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); +int octnet_get_link_stats(struct net_device *netdev); + int lio_wait_for_clean_oq(struct octeon_device *oct); /** * \brief Register ethtool operations -- cgit v1.2.3 From 80002347d6f51c45e49eb545ec7ae7077d46faf8 Mon Sep 17 00:00:00 2001 From: Pradeep Nalla Date: Wed, 25 Apr 2018 17:00:22 -0700 Subject: liquidio: add support for ndo_get_stats64 instead of ndo_get_stats Support ndo_get_stats64 instead of ndo_get_stats. Also add stats for multicast and broadcast packets. Signed-off-by: Pradeep Nalla Acked-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 4 + drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 98 +++++++++++++--------- drivers/net/ethernet/cavium/liquidio/lio_main.c | 52 ++++++++---- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 47 +++++++---- .../net/ethernet/cavium/liquidio/liquidio_common.h | 5 ++ 5 files changed, 135 insertions(+), 71 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 334a068b6a62..844e288d60fe 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1207,6 +1207,8 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, /* RX firmware stats */ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_total_mcast = rsp_rstats->fw_total_mcast; + rstats->fw_total_bcast = rsp_rstats->fw_total_bcast; rstats->fw_err_pko = rsp_rstats->fw_err_pko; rstats->fw_err_link = rsp_rstats->fw_err_link; rstats->fw_err_drop = rsp_rstats->fw_err_drop; @@ -1251,6 +1253,8 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, /* firmware stats */ tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent; + tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent; tstats->fw_err_pko = rsp_tstats->fw_err_pko; tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 351549c83472..64c817a63a01 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -112,6 +112,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_tso_err", "tx_vxlan", + "tx_mcast", + "tx_bcast", + "mac_tx_total_pkts", "mac_tx_total_bytes", "mac_tx_mcast_pkts", @@ -127,6 +130,8 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "rx_total_rcvd", "rx_total_fwd", + "rx_mcast", + "rx_bcast", "rx_jabber_err", "rx_l2_err", "rx_frame_err", @@ -171,6 +176,10 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { "tx_errors", "rx_dropped", "tx_dropped", + "rx_mcast", + "tx_mcast", + "rx_bcast", + "tx_bcast", "link_state_changes", }; @@ -1056,50 +1065,48 @@ lio_get_ethtool_stats(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; - struct net_device_stats *netstats = &netdev->stats; + struct rtnl_link_stats64 lstats; int i = 0, j; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return; - netdev->netdev_ops->ndo_get_stats(netdev); - octnet_get_link_stats(netdev); - + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ - data[i++] = CVM_CAST64(netstats->rx_packets); + data[i++] = lstats.rx_packets; /*sum of oct->instr_queue[iq_no]->stats.tx_done */ - data[i++] = CVM_CAST64(netstats->tx_packets); + data[i++] = lstats.tx_packets; /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ - data[i++] = CVM_CAST64(netstats->rx_bytes); + data[i++] = lstats.rx_bytes; /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ - data[i++] = CVM_CAST64(netstats->tx_bytes); - data[i++] = CVM_CAST64(netstats->rx_errors + - oct_dev->link_stats.fromwire.fcs_err + - oct_dev->link_stats.fromwire.jabber_err + - oct_dev->link_stats.fromwire.l2_err + - oct_dev->link_stats.fromwire.frame_err); - data[i++] = CVM_CAST64(netstats->tx_errors); + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors + + oct_dev->link_stats.fromwire.fcs_err + + oct_dev->link_stats.fromwire.jabber_err + + oct_dev->link_stats.fromwire.l2_err + + oct_dev->link_stats.fromwire.frame_err; + data[i++] = lstats.tx_errors; /*sum of oct->droq[oq_no]->stats->rx_dropped + *oct->droq[oq_no]->stats->dropped_nodispatch + *oct->droq[oq_no]->stats->dropped_toomany + *oct->droq[oq_no]->stats->dropped_nomem */ - data[i++] = CVM_CAST64(netstats->rx_dropped + - oct_dev->link_stats.fromwire.fifo_err + - oct_dev->link_stats.fromwire.dmac_drop + - oct_dev->link_stats.fromwire.red_drops + - oct_dev->link_stats.fromwire.fw_err_pko + - oct_dev->link_stats.fromwire.fw_err_link + - oct_dev->link_stats.fromwire.fw_err_drop); + data[i++] = lstats.rx_dropped + + oct_dev->link_stats.fromwire.fifo_err + + oct_dev->link_stats.fromwire.dmac_drop + + oct_dev->link_stats.fromwire.red_drops + + oct_dev->link_stats.fromwire.fw_err_pko + + oct_dev->link_stats.fromwire.fw_err_link + + oct_dev->link_stats.fromwire.fw_err_drop; /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = CVM_CAST64(netstats->tx_dropped + - oct_dev->link_stats.fromhost.max_collision_fail + - oct_dev->link_stats.fromhost.max_deferral_fail + - oct_dev->link_stats.fromhost.total_collisions + - oct_dev->link_stats.fromhost.fw_err_pko + - oct_dev->link_stats.fromhost.fw_err_link + - oct_dev->link_stats.fromhost.fw_err_drop + - oct_dev->link_stats.fromhost.fw_err_pki); + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.max_collision_fail + + oct_dev->link_stats.fromhost.max_deferral_fail + + oct_dev->link_stats.fromhost.total_collisions + + oct_dev->link_stats.fromhost.fw_err_pko + + oct_dev->link_stats.fromhost.fw_err_link + + oct_dev->link_stats.fromhost.fw_err_drop + + oct_dev->link_stats.fromhost.fw_err_pki; /* firmware tx stats */ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. @@ -1134,6 +1141,10 @@ lio_get_ethtool_stats(struct net_device *netdev, */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); + /* Multicast packets sent by this port */ + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + /* mac tx statistics */ /*CVMX_BGXX_CMRX_TX_STAT5 */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); @@ -1170,6 +1181,9 @@ lio_get_ethtool_stats(struct net_device *netdev, *fw_total_fwd */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /* Multicast packets received on this port */ + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ @@ -1338,7 +1352,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, __attribute__((unused)), u64 *data) { - struct net_device_stats *netstats = &netdev->stats; + struct rtnl_link_stats64 lstats; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int i = 0, j, vj; @@ -1346,25 +1360,31 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return; - netdev->netdev_ops->ndo_get_stats(netdev); + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ - data[i++] = CVM_CAST64(netstats->rx_packets); + data[i++] = lstats.rx_packets; /* sum of oct->instr_queue[iq_no]->stats.tx_done */ - data[i++] = CVM_CAST64(netstats->tx_packets); + data[i++] = lstats.tx_packets; /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ - data[i++] = CVM_CAST64(netstats->rx_bytes); + data[i++] = lstats.rx_bytes; /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ - data[i++] = CVM_CAST64(netstats->tx_bytes); - data[i++] = CVM_CAST64(netstats->rx_errors); - data[i++] = CVM_CAST64(netstats->tx_errors); + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors; + data[i++] = lstats.tx_errors; /* sum of oct->droq[oq_no]->stats->rx_dropped + * oct->droq[oq_no]->stats->dropped_nodispatch + * oct->droq[oq_no]->stats->dropped_toomany + * oct->droq[oq_no]->stats->dropped_nomem */ - data[i++] = CVM_CAST64(netstats->rx_dropped); + data[i++] = lstats.rx_dropped; /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = CVM_CAST64(netstats->tx_dropped); + data[i++] = lstats.tx_dropped; + + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + /* lio->link_changes */ data[i++] = CVM_CAST64(lio->link_changes); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f3891ae11b02..fe3edf13c8f0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2252,14 +2252,11 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return 0; } -/** - * \brief Net device get_stats - * @param netdev network device - */ -static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) { struct lio *lio = GET_LIO(netdev); - struct net_device_stats *stats = &netdev->stats; struct octeon_device *oct; u64 pkts = 0, drop = 0, bytes = 0; struct oct_droq_stats *oq_stats; @@ -2269,7 +2266,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) - return stats; + return; for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; @@ -2279,9 +2276,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += iq_stats->tx_tot_bytes; } - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = drop; + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; pkts = 0; drop = 0; @@ -2298,11 +2295,34 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += oq_stats->rx_bytes_received; } - stats->rx_bytes = bytes; - stats->rx_packets = pkts; - stats->rx_dropped = drop; - - return stats; + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + octnet_get_link_stats(netdev); + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + lstats->collisions = oct->link_stats.fromhost.total_collisions; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; + /* recv'r fifo overrun */ + lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; + + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors + lstats->rx_fifo_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; + lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; + + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors + + lstats->tx_fifo_errors; } /** @@ -3355,7 +3375,7 @@ static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, - .ndo_get_stats = liquidio_get_stats, + .ndo_get_stats64 = liquidio_get_stats64, .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index f92dfa411de6..b7b91d15ce3f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1336,24 +1336,21 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return 0; } -/** - * \brief Net device get_stats - * @param netdev network device - */ -static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) { struct lio *lio = GET_LIO(netdev); - struct net_device_stats *stats = &netdev->stats; + struct octeon_device *oct; u64 pkts = 0, drop = 0, bytes = 0; struct oct_droq_stats *oq_stats; struct oct_iq_stats *iq_stats; - struct octeon_device *oct; int i, iq_no, oq_no; oct = lio->oct_dev; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) - return stats; + return; for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; @@ -1363,9 +1360,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += iq_stats->tx_tot_bytes; } - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = drop; + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; pkts = 0; drop = 0; @@ -1382,11 +1379,29 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += oq_stats->rx_bytes_received; } - stats->rx_bytes = bytes; - stats->rx_packets = pkts; - stats->rx_dropped = drop; + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + octnet_get_link_stats(netdev); + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; + + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; - return stats; + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors; } /** @@ -2034,7 +2049,7 @@ static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, - .ndo_get_stats = liquidio_get_stats, + .ndo_get_stats64 = liquidio_get_stats64, .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 34a94daca590..2166744b6812 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -802,6 +802,9 @@ struct nic_rx_stats { u64 fw_total_rcvd; u64 fw_total_fwd; u64 fw_total_fwd_bytes; + u64 fw_total_mcast; + u64 fw_total_bcast; + u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; @@ -858,6 +861,8 @@ struct nic_tx_stats { u64 fw_total_sent; u64 fw_total_fwd; u64 fw_total_fwd_bytes; + u64 fw_total_mcast_sent; + u64 fw_total_bcast_sent; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; -- cgit v1.2.3 From 094be0927ff3e802b3f26b7f66c563a4b93a4ab5 Mon Sep 17 00:00:00 2001 From: Tobias Regnery Date: Thu, 26 Apr 2018 12:36:36 +0200 Subject: geneve: fix build with modular IPV6 Commit c40e89fd358e ("geneve: configure MTU based on a lower device") added an IS_ENABLED(CONFIG_IPV6) to geneve, leading to the following link error with CONFIG_GENEVE=y and CONFIG_IPV6=m: drivers/net/geneve.o: In function `geneve_link_config': geneve.c:(.text+0x14c): undefined reference to `rt6_lookup' Fix this by adding a Kconfig dependency and forcing GENEVE to be a module when IPV6 is a module. Fixes: c40e89fd358e ("geneve: configure MTU based on a lower device") Signed-off-by: Tobias Regnery Signed-off-by: David S. Miller --- drivers/net/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 891846655000..a029b27fd002 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -198,6 +198,7 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" depends on INET && NET_UDP_TUNNEL + depends on IPV6 || !IPV6 select NET_IP_TUNNEL select GRO_CELLS ---help--- -- cgit v1.2.3 From 51dce24bcdbdc493a87a17bcaf898b1f1d2fa600 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 26 Apr 2018 08:08:09 -0700 Subject: net: intel: Cleanup the copyright/license headers After many years of having a ~30 line copyright and license header to our source files, we are finally able to reduce that to one line with the advent of the SPDX identifier. Also caught a few files missing the SPDX license identifier, so fixed them up. Signed-off-by: Jeff Kirsher Acked-by: Shannon Nelson Acked-by: Richard Cochran Tested-by: Andrew Bowers Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 28 +------------------- drivers/net/ethernet/intel/e1000/Makefile | 26 ------------------- drivers/net/ethernet/intel/e1000/e1000.h | 29 +-------------------- drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 23 +---------------- drivers/net/ethernet/intel/e1000/e1000_hw.c | 28 +------------------- drivers/net/ethernet/intel/e1000/e1000_hw.h | 28 +------------------- drivers/net/ethernet/intel/e1000/e1000_main.c | 28 +------------------- drivers/net/ethernet/intel/e1000/e1000_osdep.h | 29 +-------------------- drivers/net/ethernet/intel/e1000/e1000_param.c | 28 +------------------- drivers/net/ethernet/intel/e1000e/80003es2lan.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/80003es2lan.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/82571.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/82571.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/Makefile | 27 +------------------ drivers/net/ethernet/intel/e1000e/defines.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/e1000.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/ethtool.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/hw.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/ich8lan.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/ich8lan.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/mac.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/mac.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/manage.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/manage.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/netdev.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/nvm.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/nvm.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/param.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/phy.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/phy.h | 21 +-------------- drivers/net/ethernet/intel/e1000e/ptp.c | 21 +-------------- drivers/net/ethernet/intel/e1000e/regs.h | 21 +-------------- drivers/net/ethernet/intel/fm10k/Makefile | 23 +---------------- drivers/net/ethernet/intel/fm10k/fm10k.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_common.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_common.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_mbx.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_tlv.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 20 +-------------- drivers/net/ethernet/intel/fm10k/fm10k_vf.h | 20 +-------------- drivers/net/ethernet/intel/i40e/Makefile | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_adminq.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_adminq.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_alloc.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_client.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_client.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_common.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_dcb.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_dcb.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_devids.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_diag.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_diag.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_hmc.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_hmc.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_main.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_nvm.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_osdep.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_prototype.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_ptp.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_register.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_status.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_trace.h | 23 +---------------- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_type.h | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 26 +------------------ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/Makefile | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 26 +------------------ .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_alloc.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_common.c | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_devids.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_hmc.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_osdep.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_register.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_status.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_trace.h | 23 +---------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40e_type.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40evf.h | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40evf_client.c | 2 ++ drivers/net/ethernet/intel/i40evf/i40evf_client.h | 2 ++ drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 26 +------------------ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 26 +------------------ .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 26 +------------------ drivers/net/ethernet/intel/igb/Makefile | 28 +------------------- drivers/net/ethernet/intel/igb/e1000_82575.c | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_82575.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_defines.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_hw.h | 22 +--------------- drivers/net/ethernet/intel/igb/e1000_i210.c | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_i210.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_mac.c | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_mac.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_mbx.c | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_mbx.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_nvm.c | 22 +--------------- drivers/net/ethernet/intel/igb/e1000_nvm.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_phy.c | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_phy.h | 23 +---------------- drivers/net/ethernet/intel/igb/e1000_regs.h | 23 +---------------- drivers/net/ethernet/intel/igb/igb.h | 23 +---------------- drivers/net/ethernet/intel/igb/igb_ethtool.c | 23 +---------------- drivers/net/ethernet/intel/igb/igb_hwmon.c | 23 +---------------- drivers/net/ethernet/intel/igb/igb_main.c | 23 +---------------- drivers/net/ethernet/intel/igb/igb_ptp.c | 19 ++------------ drivers/net/ethernet/intel/igbvf/Makefile | 28 +------------------- drivers/net/ethernet/intel/igbvf/defines.h | 26 +------------------ drivers/net/ethernet/intel/igbvf/ethtool.c | 26 +------------------ drivers/net/ethernet/intel/igbvf/igbvf.h | 26 +------------------ drivers/net/ethernet/intel/igbvf/mbx.c | 26 +------------------ drivers/net/ethernet/intel/igbvf/mbx.h | 26 +------------------ drivers/net/ethernet/intel/igbvf/netdev.c | 26 +------------------ drivers/net/ethernet/intel/igbvf/regs.h | 26 +------------------ drivers/net/ethernet/intel/igbvf/vf.c | 26 +------------------ drivers/net/ethernet/intel/igbvf/vf.h | 26 +------------------ drivers/net/ethernet/intel/ixgb/Makefile | 27 ------------------- drivers/net/ethernet/intel/ixgb/ixgb.h | 28 +------------------- drivers/net/ethernet/intel/ixgb/ixgb_ee.c | 29 ++------------------- drivers/net/ethernet/intel/ixgb/ixgb_ee.h | 28 +------------------- drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c | 29 ++------------------- drivers/net/ethernet/intel/ixgb/ixgb_hw.c | 29 ++------------------- drivers/net/ethernet/intel/ixgb/ixgb_hw.h | 28 +------------------- drivers/net/ethernet/intel/ixgb/ixgb_ids.h | 28 +------------------- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 29 ++------------------- drivers/net/ethernet/intel/ixgb/ixgb_osdep.h | 28 +------------------- drivers/net/ethernet/intel/ixgb/ixgb_param.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/Makefile | 29 +-------------------- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 30 ++-------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 28 ++------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 28 ++------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h | 27 +------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 26 +------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 28 ++------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 28 +------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 29 ++------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h | 24 +---------------- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 26 +++---------------- drivers/net/ethernet/intel/ixgbevf/Makefile | 28 +------------------- drivers/net/ethernet/intel/ixgbevf/defines.h | 26 +------------------ drivers/net/ethernet/intel/ixgbevf/ethtool.c | 27 ++----------------- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 26 +------------------ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 27 ++----------------- drivers/net/ethernet/intel/ixgbevf/mbx.c | 27 ++----------------- drivers/net/ethernet/intel/ixgbevf/mbx.h | 26 +------------------ drivers/net/ethernet/intel/ixgbevf/regs.h | 26 +------------------ drivers/net/ethernet/intel/ixgbevf/vf.c | 27 ++----------------- drivers/net/ethernet/intel/ixgbevf/vf.h | 26 +------------------ 195 files changed, 222 insertions(+), 4624 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 41ad56edfb96..27d5f27163d2 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/100 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* * e100.c: Intel(R) PRO/100 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile index c7caadd3c8af..314c52d44b7c 100644 --- a/drivers/net/ethernet/intel/e1000/Makefile +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/1000 Linux driver # Copyright(c) 1999 - 2006 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ # # Makefile for the Intel(R) PRO/1000 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 3a0feea2df54..c40729b2c184 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -1,32 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3e80ca170dd7..5d365a986bb0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2006 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* ethtool support for e1000 */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 6e7e923d57bf..48428d6a00be 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* -* - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - - */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* e1000_hw.c * Shared functions for accessing and configuring the MAC diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index f09c569ec19b..b57a04954ccf 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* e1000_hw.h * Structures, enums, and macros for the MAC diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index d5eb19b86a0a..2110d5f2da19 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ #include "e1000.h" #include diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h index ae0559b8b011..e966bb290797 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h +++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h @@ -1,32 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* glue for the OS independent part of e1000 * includes register access macros diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index 345f23927bcc..d3f29ffe1e47 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 953e99df420c..257bd59bc9c6 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index ee6d1256fda4..aa9d639c6cbb 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_80003ES2LAN_H_ #define _E1000E_80003ES2LAN_H_ diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 924f2c8dfa6c..b9309302c29e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 9a24c645f726..834c238d02db 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_82571_H_ #define _E1000E_82571_H_ diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 24e391a4ac68..44e58b6e7660 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,30 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) PRO/1000 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 22883015a695..fd550dee4982 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index da88555ba1fd..c760dc72c520 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 64dc0c11147f..e084cb734eb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for e1000 */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 21802396bed6..eff75bd8a8f0 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 1551d6ce5341..cdae0efde8e6 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 3c4f82c21084..eb09c755fa17 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_ICH8LAN_H_ #define _E1000E_ICH8LAN_H_ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b293464a9f27..4abd55d646c5 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index cb0abf6c76a5..6ab261119801 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_MAC_H_ #define _E1000E_MAC_H_ diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index e027660aeb92..c4c9b20bc51f 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index 3268f2e58593..d868aad806d4 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_MANAGE_H_ #define _E1000E_MANAGE_H_ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ec4a9759a6f2..d3fef7fefea8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 68949bb41b7b..937f9af22d26 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 8e082028be7d..6a30dfea4117 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_NVM_H_ #define _E1000E_NVM_H_ diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 2def33eba9e6..098369fd3e65 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b8226ed0e338..42233019255a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index d4180b5e9196..c48777d09523 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_PHY_H_ #define _E1000E_PHY_H_ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index f941e5085f44..37c76945ad9b 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* PTP 1588 Hardware Clock (PHC) * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 16afc3c2a986..47f5ca793970 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_REGS_H_ #define _E1000E_REGS_H_ diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 93277cb99cb7..26a9746ccb14 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,26 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel(R) Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2016 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # # Makefile for the Intel(R) Ethernet Switch Host Interface Driver diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a9cdf763c59d..a903a0ba45e1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_H_ #define _FM10K_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index e303d88720ef..f51a63fca513 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_common.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h index 2bdb24d2ca9d..4c48fb73b3e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_COMMON_H_ #define _FM10K_COMMON_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index c4f733452ef2..20768ac7f17e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 43e8d839831f..dca104121c05 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 28b6b4e56487..eeac2b75a195 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 30395f5e5e87..e707d717012f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index c51d61f5f715..3f536541f45f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index c01bf30a0c9e..21021fe4f1c3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_common.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 007e1dfa9b7a..56d1abff04e2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_MBX_H_ #define _FM10K_MBX_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 26e749766337..c879af72bbf5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" #include diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index c4a2b688b38b..15071e4adb98 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7ba54c534f8c..8f0a99b6a537 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_pf.h" #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index ae81f9a16602..8e814df709d2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_PF_H_ #define _FM10K_PF_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 725ecb7abccd..2a7a40bf2b1c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_tlv.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 5d2ee759507e..160bc5b78f99 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_TLV_H_ #define _FM10K_TLV_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index dd23af11e2c1..3e608e493f9d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_TYPE_H_ #define _FM10K_TYPE_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f06913630b39..a8519c1f0406 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 66a66b73a2f1..787d0d570a28 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_VF_H_ #define _FM10K_VF_H_ diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 75437768a07c..14397e7e9925 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,29 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2015 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # # Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index a44139c1de80..f573108faec3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_H_ #define _I40E_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 843fc7781ef8..ddbea79d18e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_status.h" #include "i40e_type.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 0a8749ee9fd3..edec3df78971 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 0244923edeb8..7d888e05f96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index abed0c52e782..cb8689222c8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index d8ce4999864f..2041757f948c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index 9d464d40bc17..72994baf4941 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index c0a3dae8a2db..6f8fd70d606a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_type.h" #include "i40e_adminq.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 9fec728dc4b9..69e7d4967b1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_adminq.h" #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 4f806386cb22..2b748a60a843 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DCB_H_ #define _I40E_DCB_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 502818e3da78..a09d723d5ca0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifdef CONFIG_I40E_DCB #include "i40e.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d494dcaf18d0..94774a2a511f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index ad6a66ccb576..334b05ff685a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index df3e60470f8b..ef4d3762bf37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_diag.h" #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index be8341763475..c3340f320a18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b974482ff630..94dbe2c419ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for i40e */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 6d4b590f851b..e40f3b59e42b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_osdep.h" #include "i40e_register.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 7b5fd33d70ae..1c78de838857 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index cd40dc487b38..994011c38fb4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_osdep.h" #include "i40e_register.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index 79e1396735d9..c46a2c449e60 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 16229998fb1e..41bad112a907 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index ba9687c03795..51554df9cb9a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 9c3c3b0d3ac4..a07574bff550 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 2ec24188d6e2..3170655cdeb9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5b47dd1f75a5..47d10ad39c18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index b3e206e49cc2..52e3680c57f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 10c86f63dc52..77be0702d07c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index 410ba13bcf21..424f02077e2e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Modeled on trace-events-sample.h */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 87fb27ab9c24..6959897c789e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 4bf318b8be85..fdd2c55f03a6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index bfb80092b352..40968a4216a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 35173cbe80f7..3e7ab9fbeb0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 57f727bb9e36..bf67d62e2b5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_VIRTCHNL_PF_H_ #define _I40E_VIRTCHNL_PF_H_ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile index 1e89c5487676..3c5c6e962280 100644 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ b/drivers/net/ethernet/intel/i40evf/Makefile @@ -1,29 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver -# Copyright(c) 2013 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # ## Makefile for the Intel(R) 40GbE VF driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 6fd677efa9da..c355120dfdfd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_status.h" #include "i40e_type.h" diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index a7137c165256..1f264b9b6805 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 439e71882049..aa81e87cd471 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h index 7e0fddd8af36..cb8689222c8b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 67140cdbcd7a..0841b2d1ca6d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_type.h" #include "i40e_adminq.h" diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 352dd3f3eb6a..f300bf271824 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 7432596164f4..1c78de838857 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h index ddac0e4908d3..82b00f70a632 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 8668ad6c1a65..3ddddb46455b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 72501bd0f1a9..a358f4b9d5aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c9c935659758..49e1f57d99cc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h index 0d7993ecb99a..77be0702d07c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h index ece01dd12a3c..d7a4e68820a8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Modeled on trace-events-sample.h */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 12bd937861e7..87101f565be1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 5790897eae2e..3b5a63b3236e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 449de4b0058e..c77cb9f672d8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 3a7a1e77bf39..98b834932dd3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40EVF_H_ #define _I40EVF_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c index da60ce12b33d..a30d093a52d6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + #include #include diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h index 15a10da5bd4a..fc6592c3de9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index dc4cde274fb8..404595efea78 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for i40evf */ #include "i40evf.h" diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5f71532be7f1..97cda4a8f8e0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40evf.h" #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 26a59890532f..8bcefb7f7ac0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40evf.h" #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index c48583e98ac1..394c1e0656b9 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, see . -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82575 PCI-Express ethernet driver # diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index dd9b6cac220d..b13b42e5a1d9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* e1000_82575 * e1000_82576 diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index e53ebe97d709..6ad775b1a4c5 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d3d1d868e7ba..252440a418dc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index ff835e1e853d..5d87957b2627 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,25 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 6f548247e6d8..c54ebedca6da 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* e1000_i210 * e1000_i211 diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 56f015ccb206..5c437fdc49ee 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_I210_H_ #define _E1000_I210_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 298afa0d9159..79ee0a747260 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 04d80c765aee..6e110f28f922 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index ef42f1689b3b..46debd991bfe 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include "e1000_mbx.h" diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 4f0ecd28354d..178e60ec71d4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index e4596f151cd4..09f4dcb09632 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,25 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index dde68cd54a53..091cddf4ada8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 4ec61243da82..2be0e762ec69 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 856d2cda0643..5894e4b1d0a8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_PHY_H_ #define _E1000_PHY_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index e8fa8c6530e0..0ad737d2f289 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 990a7fb32e4e..9643b5b3d444 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 6697c273ab59..2d798499d35e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* ethtool support for igb */ diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index bebe43b3a836..3b83747b2700 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include "igb.h" #include "e1000_82575.h" diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3ce43207a35f..78574c06635b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7454b9895a65..9f4d700e09df 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1,21 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 - * - * Copyright (C) 2011 Richard Cochran - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - */ +/* Copyright (C) 2011 Richard Cochran */ + #include #include #include diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index efe29dae384a..afd3e36eae75 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel(R) 82576 Virtual Function Linux driver -# Copyright(c) 2009 - 2012 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 2009 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82576 VF ethernet driver # diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index 04bcfec0641b..4437f832412d 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index ca39e3cccaeb..3ae358b35227 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ /* ethtool support for igbvf */ diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index f5bf248e22eb..eee26a3be90b 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 9195884096f8..163e5838f7c2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #include "mbx.h" diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 479b062fe9ee..e5b31818d565 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e2b7502f1953..f818f060e5a7 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 614e52409f11..625a309a3355 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index bfe8d8297b2e..b8ba3f94c363 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #include "vf.h" diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 193b50026246..c71b0d7dbcee 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #ifndef _E1000_VF_H_ #define _E1000_VF_H_ diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile index 1b42dd554dd2..2433e9300a33 100644 --- a/drivers/net/ethernet/intel/ixgb/Makefile +++ b/drivers/net/ethernet/intel/ixgb/Makefile @@ -1,32 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/10GbE Linux driver # Copyright(c) 1999 - 2008 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - # # Makefile for the Intel(R) PRO/10GbE ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index 92022841755f..e85271b68410 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_H_ #define _IXGB_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c index eca216b9b859..129286fc1634 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h index 475297a810fe..3ee0a09e5d0a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_EE_H_ #define _IXGB_EE_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index d10a0d242dda..43744bf0fc1c 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* ethtool support for ixgb */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c index bf9a220f71fb..cbaa933ef30d 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* ixgb_hw.c * Shared functions for accessing and configuring the adapter diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 19f36d87ef61..6064583095da 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_HW_H_ #define _IXGB_HW_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h index 24e849902d60..9695b8215f01 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_IDS_H_ #define _IXGB_IDS_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 2353c383f0a7..62f2173bc20e 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h index b1710379192e..7bd54efa698d 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* glue for the OS independent part of ixgb * includes register access macros diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c index 04a60640ddda..f0cadd532c53 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 4cd96c88cb5d..5414685189ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,32 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2013 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 10GbE PCI Express ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 8fccca57cd6a..fc534e91c6b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_H_ #define _IXGBE_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index cb0fe5fedb33..eee277c1bedf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 66a74f4651e8..42f63b943ea0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 633be93f3dbb..8d038837f72b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2b311382167a..4b531e8ae38a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_COMMON_H_ #define _IXGBE_COMMON_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index aaea8282bfd2..d26cea5b43bd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,31 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 73b6362d4327..60cd5863bf5e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_CONFIG_H_ #define _DCB_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 085130626330..379ae747cdce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index 7edce607f901..fdca41abb44c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_82598_CONFIG_H_ #define _DCB_82598_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 1eed6811e914..7948849840a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index fa030f0abc18..c6f084883cab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_82599_CONFIG_H_ #define _DCB_82599_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index b33f3f87e4b1..c00332d2e02a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index ad54080488ee..55fe8114fe99 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -1,30 +1,6 @@ -/******************************************************************************* +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index c0e6ab42e0e1..bdd179c29ea4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbe */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 7a09a40e4472..b5219e024f70 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index cf1919901514..724f5382329f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_FCOE_H #define _IXGBE_FCOE_H diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 68af127987bc..41af2b81e960 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -1,29 +1,5 @@ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include "ixgbe.h" #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index 4f099f516645..9ef7faadda69 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -1,30 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program. If not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ #ifndef _IXGBE_IPSEC_H_ #define _IXGBE_IPSEC_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ed4cbe94c355..893a9206e718 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b6e5cea84949..6652b201df5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index a0cb84381cd0..5679293e53f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index c4628b663590..e085b6520dac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 72446644f9fa..fcae520647ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux drive - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MODEL_H_ #define _IXGBE_MODEL_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 91bde90f9265..919a7af84b42 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index d6a7e77348c5..64e44e01c973 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_PHY_H_ #define _IXGBE_PHY_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index f6cc9166082a..b3e0d8bb5cbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,30 +1,6 @@ -/******************************************************************************* +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ #include "ixgbe.h" #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index bfc4171cd3f9..2649c06d877b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index e30d1f07e891..3ec21923c89c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_SRIOV_H_ #define _IXGBE_SRIOV_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 24766e125592..204844288c16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_common.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2daa81e6e9b2..e8ed37749ab1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index b8c5fd2a2115..de563cfd294d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include #include diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index 182d640e9f7a..e246c0d2a427 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,27 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 3123267dfba9..959a37599a6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,26 +1,6 @@ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ + #include "ixgbe_x540.h" #include "ixgbe_type.h" #include "ixgbe_common.h" diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index bb47814cfa90..aba1e6a37a6a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 82599 Virtual Function driver -# Copyright(c) 1999 - 2012 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82599 VF ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 71c828842b11..700d8eb2f6f8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_DEFINES_H_ #define _IXGBEVF_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 8e7d6c6f5c92..e7813d76527c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbevf */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 447ce1d5e0e3..70c75681495f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e3d04f226d57..e91c3d1a43ce 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 2819abc454c7..6bc1953263b9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "mbx.h" #include "ixgbevf.h" diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 5ec947fe3d09..bfd9ae150808 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 278f73980501..68d16ae5b65a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_REGS_H_ #define _IXGBEVF_REGS_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 38d3a327c1bc..bf0577e819e1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 194fbdaa4519..d1e9e306653b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see . - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ -- cgit v1.2.3 From c347b9273c26453fa8c9e1acb92f45546d888745 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Apr 2018 14:34:25 -0700 Subject: hv_netvsc: simplify receive side calling arguments The calls up from the napi poll reading the receive ring had many places where an argument was being recreated. I.e the caller already had the value and wasn't passing it, then the callee would use known relationship to determine the same value. Simpler and faster to just pass arguments needed. Also, add const in a couple places where message is being only read. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 58 ++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 32 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index e7308958b7a9..d2ee66c259a7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -652,16 +652,14 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, sync_change_bit(index, net_device->send_section_map); } -static void netvsc_send_tx_complete(struct netvsc_device *net_device, - struct vmbus_channel *incoming_channel, - struct hv_device *device, +static void netvsc_send_tx_complete(struct net_device *ndev, + struct netvsc_device *net_device, + struct vmbus_channel *channel, const struct vmpacket_descriptor *desc, int budget) { struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; - struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *ndev_ctx = netdev_priv(ndev); - struct vmbus_channel *channel = device->channel; u16 q_idx = 0; int queue_sends; @@ -675,7 +673,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); q_idx = packet->q_idx; - channel = incoming_channel; tx_stats = &net_device->chan_table[q_idx].tx_stats; @@ -705,14 +702,13 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, } } -static void netvsc_send_completion(struct netvsc_device *net_device, +static void netvsc_send_completion(struct net_device *ndev, + struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, - struct hv_device *device, const struct vmpacket_descriptor *desc, int budget) { - struct nvsp_message *nvsp_packet = hv_pkt_data(desc); - struct net_device *ndev = hv_get_drvdata(device); + const struct nvsp_message *nvsp_packet = hv_pkt_data(desc); switch (nvsp_packet->hdr.msg_type) { case NVSP_MSG_TYPE_INIT_COMPLETE: @@ -726,8 +722,8 @@ static void netvsc_send_completion(struct netvsc_device *net_device, break; case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: - netvsc_send_tx_complete(net_device, incoming_channel, - device, desc, budget); + netvsc_send_tx_complete(ndev, net_device, incoming_channel, + desc, budget); break; default: @@ -1092,12 +1088,11 @@ static void enq_receive_complete(struct net_device *ndev, static int netvsc_receive(struct net_device *ndev, struct netvsc_device *net_device, - struct net_device_context *net_device_ctx, - struct hv_device *device, struct vmbus_channel *channel, const struct vmpacket_descriptor *desc, - struct nvsp_message *nvsp) + const struct nvsp_message *nvsp) { + struct net_device_context *net_device_ctx = netdev_priv(ndev); const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); u16 q_idx = channel->offermsg.offer.sub_channel_index; @@ -1158,13 +1153,12 @@ static int netvsc_receive(struct net_device *ndev, return count; } -static void netvsc_send_table(struct hv_device *hdev, - struct nvsp_message *nvmsg) +static void netvsc_send_table(struct net_device *ndev, + const struct nvsp_message *nvmsg) { - struct net_device *ndev = hv_get_drvdata(hdev); struct net_device_context *net_device_ctx = netdev_priv(ndev); - int i; u32 count, *tab; + int i; count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { @@ -1179,24 +1173,25 @@ static void netvsc_send_table(struct hv_device *hdev, net_device_ctx->tx_table[i] = tab[i]; } -static void netvsc_send_vf(struct net_device_context *net_device_ctx, - struct nvsp_message *nvmsg) +static void netvsc_send_vf(struct net_device *ndev, + const struct nvsp_message *nvmsg) { + struct net_device_context *net_device_ctx = netdev_priv(ndev); + net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; } -static inline void netvsc_receive_inband(struct hv_device *hdev, - struct net_device_context *net_device_ctx, - struct nvsp_message *nvmsg) +static void netvsc_receive_inband(struct net_device *ndev, + const struct nvsp_message *nvmsg) { switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: - netvsc_send_table(hdev, nvmsg); + netvsc_send_table(ndev, nvmsg); break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: - netvsc_send_vf(net_device_ctx, nvmsg); + netvsc_send_vf(ndev, nvmsg); break; } } @@ -1208,24 +1203,23 @@ static int netvsc_process_raw_pkt(struct hv_device *device, const struct vmpacket_descriptor *desc, int budget) { - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct nvsp_message *nvmsg = hv_pkt_data(desc); + const struct nvsp_message *nvmsg = hv_pkt_data(desc); trace_nvsp_recv(ndev, channel, nvmsg); switch (desc->type) { case VM_PKT_COMP: - netvsc_send_completion(net_device, channel, device, + netvsc_send_completion(ndev, net_device, channel, desc, budget); break; case VM_PKT_DATA_USING_XFER_PAGES: - return netvsc_receive(ndev, net_device, net_device_ctx, - device, channel, desc, nvmsg); + return netvsc_receive(ndev, net_device, channel, + desc, nvmsg); break; case VM_PKT_DATA_INBAND: - netvsc_receive_inband(device, net_device_ctx, nvmsg); + netvsc_receive_inband(ndev, nvmsg); break; default: -- cgit v1.2.3 From 2e8ef77ee0ff1117251a48f79d2d57d65afd0495 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:31 -0400 Subject: bnxt_en: Add TC to hardware QoS queue mapping logic. The current driver maps MQPRIO traffic classes directly 1:1 to the internal hardware queues (TC0 maps to hardware queue 0, etc). This direct mapping requires the internal hardware queues to be reconfigured from lossless to lossy and vice versa when necessary. This involves reconfiguring internal buffer thresholds which is disruptive and not always reliable. Implement a new scheme to map TCs to internal hardware queues by matching up their PFC requirements. This will eliminate the need to reconfigure a hardware queue internal buffers at run time. After remapping, the NIC is closed and opened for the new TC to hardware queues to take effect. This patch only adds the basic mapping logic. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 65 +++++++++++++++++---------- 3 files changed, 47 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f83769d8047b..bda618d06118 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2383,6 +2383,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; + u8 qidx; ring = &txr->tx_ring_struct; @@ -2411,7 +2412,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } - ring->queue_id = bp->q_info[j].queue_id; + qidx = bp->tc_to_qidx[j]; + ring->queue_id = bp->q_info[qidx].queue_id; if (i < bp->tx_nr_rings_xdp) continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -5309,6 +5311,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) for (i = 0; i < bp->max_tc; i++) { bp->q_info[i].queue_id = *qptr++; bp->q_info[i].queue_profile = *qptr++; + bp->tc_to_qidx[i] = i; } qportcfg_exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3d55d3b56865..057f8a2b5d13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1242,6 +1242,7 @@ struct bnxt { u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; + u8 tc_to_qidx[BNXT_MAX_QUEUE]; unsigned int current_interval; #define BNXT_TIMER_INTERVAL HZ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 3c746f2d9ed8..1b72f8a044e1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -21,6 +21,21 @@ #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB +static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) +{ + int i, j; + + for (i = 0; i < bp->max_tc; i++) { + if (bp->q_info[i].queue_id == queue_id) { + for (j = 0; j < bp->max_tc; j++) { + if (bp->tc_to_qidx[j] == i) + return j; + } + } + } + return -EINVAL; +} + static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { struct hwrm_queue_pri2cos_cfg_input req = {0}; @@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) pri2cos = &req.pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 qidx; + req.enables |= cpu_to_le32( QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); - pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id; + qidx = bp->tc_to_qidx[ets->prio_tc[i]]; + pri2cos[i] = bp->q_info[qidx].queue_id; } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return rc; @@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; - int i, j; + int i; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 queue_id = pri2cos[i]; + int tc; - for (j = 0; j < bp->max_tc; j++) { - if (bp->q_info[j].queue_id == queue_id) { - ets->prio_tc[i] = j; - break; - } - } + tc = bnxt_queue_to_tc(bp, queue_id); + if (tc >= 0) + ets->prio_tc[i] = tc; } } mutex_unlock(&bp->hwrm_cmd_lock); @@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, void *data; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); - data = &req.unused_0; - for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) { + for (i = 0; i < max_tc; i++) { + u8 qidx; + req.enables |= cpu_to_le32( QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); memset(&cos2bw, 0, sizeof(cos2bw)); - cos2bw.queue_id = bp->q_info[i].queue_id; + qidx = bp->tc_to_qidx[i]; + cos2bw.queue_id = bp->q_info[qidx].queue_id; if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; @@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } + data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); - if (i == 0) { + if (qidx == 0) { req.queue_id0 = cos2bw.queue_id; req.unused_0 = 0; } @@ -132,22 +151,22 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { - int j; + int tc; memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4); if (i == 0) cos2bw.queue_id = resp->queue_id0; - for (j = 0; j < bp->max_tc; j++) { - if (bp->q_info[j].queue_id != cos2bw.queue_id) - continue; - if (cos2bw.tsa == - QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { - ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT; - } else { - ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS; - ets->tc_tx_bw[j] = cos2bw.bw_weight; - } + tc = bnxt_queue_to_tc(bp, cos2bw.queue_id); + if (tc < 0) + continue; + + if (cos2bw.tsa == + QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT; + } else { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS; + ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } mutex_unlock(&bp->hwrm_cmd_lock); -- cgit v1.2.3 From d31cd579a45c44ede9e56c2f6d33537ba395a49b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:32 -0400 Subject: bnxt_en: Remap TC to hardware queues when configuring PFC. Initially, the MQPRIO TCs are mapped 1:1 directly to the hardware queues. Some of these hardware queues are configured to be lossless. When PFC is enabled on one of more TCs, we now need to remap the TCs that have PFC enabled to the lossless hardware queues. After remapping, we need to close and open the NIC for the new mapping to take effect. We also need to reprogram all ETS parameters. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 101 +++++++++++++++----------- 1 file changed, 60 insertions(+), 41 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 1b72f8a044e1..d5bc72cecde3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -173,44 +173,59 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) return 0; } -static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask) +static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) { - struct hwrm_queue_cfg_input req = {0}; - int i; + unsigned long qmap = 0; + int max = bp->max_tc; + int i, j, rc; - if (netif_running(bp->dev)) - bnxt_tx_disable(bp); + /* Assign lossless TCs first */ + for (i = 0, j = 0; i < max; ) { + if (lltc_mask & (1 << i)) { + if (BNXT_LLQ(bp->q_info[j].queue_profile)) { + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + i++; + } + j++; + continue; + } + i++; + } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR); - req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE); + for (i = 0, j = 0; i < max; i++) { + if (lltc_mask & (1 << i)) + continue; + j = find_next_zero_bit(&qmap, max, j); + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + j++; + } - /* Configure lossless queues to lossy first */ - req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; - for (i = 0; i < bp->max_tc; i++) { - if (BNXT_LLQ(bp->q_info[i].queue_profile)) { - req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); - hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - bp->q_info[i].queue_profile = - QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + if (rc) { + netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc); + return rc; } } - - /* Now configure desired queues to lossless */ - req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; - for (i = 0; i < bp->max_tc; i++) { - if (lltc_mask & (1 << i)) { - req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); - hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - bp->q_info[i].queue_profile = - QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; + if (bp->ieee_ets) { + int tc = netdev_get_num_tc(bp->dev); + + if (!tc) + tc = 1; + rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc); + if (rc) { + netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc); + return rc; + } + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets); + if (rc) { + netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc); + return rc; } } - if (netif_running(bp->dev)) - bnxt_tx_enable(bp); - return 0; } @@ -220,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; - bool need_q_recfg = false; + bool need_q_remap = false; int rc; if (!my_ets) @@ -240,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) if (lltc_count > bp->max_lltc) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return rc; - for (i = 0; i < bp->max_tc; i++) { if (tc_mask & (1 << i)) { - if (!BNXT_LLQ(bp->q_info[i].queue_profile)) - need_q_recfg = true; + u8 qidx = bp->tc_to_qidx[i]; + + if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { + need_q_remap = true; + break; + } } } - if (need_q_recfg) - rc = bnxt_hwrm_queue_cfg(bp, tc_mask); + if (need_q_remap) + rc = bnxt_queue_remap(bp, tc_mask); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); + req.flags = cpu_to_le32(pri_mask); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; return rc; } -- cgit v1.2.3 From 59895f596b13b4b09f739bf8470a5028a5ff2b9a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:33 -0400 Subject: bnxt_en: Check the lengths of encapsulated firmware responses. Firmware messages that are forwarded from PF to VFs are encapsulated. The size of these encapsulated messages must not exceed the maximum defined message size. Add appropriate checks to avoid oversize messages. Firmware messages may be expanded in future specs and this will provide some guardrails to avoid data corruption. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 9 +++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 12 ++++++++++++ 2 files changed, 21 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f952963d594e..18ee471c0002 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_fwd_resp_input req = {0}; struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); /* Set the new target id */ @@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_reject_fwd_resp_input req = {0}; struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); @@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_exec_fwd_resp_input req = {0}; struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index d10f6f6c7860..6f6d85010ecf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -11,6 +11,18 @@ #ifndef BNXT_SRIOV_H #define BNXT_SRIOV_H +#define BNXT_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \ + sizeof(struct hwrm_fwd_resp_input)) + +#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\ + offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id)) + +#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\ + offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id)) + int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); int bnxt_set_vf_mac(struct net_device *, int, u8 *); int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); -- cgit v1.2.3 From ca2c39e2ec04e78ca6eb5162621cb9a5b897ca16 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:34 -0400 Subject: bnxt_en: Do not set firmware time from VF driver on older firmware. Older firmware will reject this call and cause an error message to be printed by the VF driver. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bda618d06118..aff4b4eed1cb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5379,7 +5379,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) struct tm tm; time64_t now = ktime_get_real_seconds(); - if (bp->hwrm_spec_code < 0x10400) + if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || + bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; time64_to_tm(now, 0, &tm); -- cgit v1.2.3 From 2727c888f2f8bef071e9a07d6e2f018840d0a834 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:35 -0400 Subject: bnxt_en: Simplify ring alloc/free error messages. Replace switch statements printing different messages for every ring type with a common message. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 43 +++++-------------------------- 1 file changed, 6 insertions(+), 37 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aff4b4eed1cb..b83c2ac17620 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4336,26 +4336,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, mutex_unlock(&bp->hwrm_cmd_lock); if (rc || err) { - switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_L2_CMPL: - netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", - rc, err); - return -1; - - case RING_FREE_REQ_RING_TYPE_RX: - netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", - rc, err); - return -1; - - case RING_FREE_REQ_RING_TYPE_TX: - netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", - rc, err); - return -1; - - default: - netdev_err(bp->dev, "Invalid ring\n"); - return -1; - } + netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", + ring_type, rc, err); + return -EIO; } ring->fw_ring_id = ring_id; return rc; @@ -4479,23 +4462,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, mutex_unlock(&bp->hwrm_cmd_lock); if (rc || error_code) { - switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_L2_CMPL: - netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", - rc); - return rc; - case RING_FREE_REQ_RING_TYPE_RX: - netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", - rc); - return rc; - case RING_FREE_REQ_RING_TYPE_TX: - netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", - rc); - return rc; - default: - netdev_err(bp->dev, "Invalid ring\n"); - return -1; - } + netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", + ring_type, rc, error_code); + return -EIO; } return 0; } -- cgit v1.2.3 From 20c1d28e106c0b526ae015fcac8e1e254bff091c Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Thu, 26 Apr 2018 17:44:36 -0400 Subject: bnxt_en: Display function level rx/tx_discard_pkts via ethtool Add counters to display sum of rx/tx_discard_pkts of all rings as function level statistics via ethtool. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8ba14ae00e8f..0ea8466d531b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -140,6 +140,19 @@ reset_coalesce: #define BNXT_RX_STATS_EXT_ENTRY(counter) \ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } +enum { + RX_TOTAL_DISCARDS, + TX_TOTAL_DISCARDS, +}; + +static struct { + u64 counter; + char string[ETH_GSTRING_LEN]; +} bnxt_sw_func_stats[] = { + {0, "rx_total_discard_pkts"}, + {0, "tx_total_discard_pkts"}, +}; + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -237,6 +250,7 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), }; +#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) @@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + num_stats += BNXT_NUM_SW_FUNC_STATS; + if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; @@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, if (!bp->bnapi) return; + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) + bnxt_sw_func_stats[i].counter = 0; + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + + bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += + le64_to_cpu(cpr->hw_stats->rx_discard_pkts); + bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += + le64_to_cpu(cpr->hw_stats->tx_discard_pkts); } + + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) + buf[j] = bnxt_sw_func_stats[i].counter; + if (bp->flags & BNXT_FLAG_PORT_STATS) { __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; @@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; } + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { + strcpy(buf, bnxt_sw_func_stats[i].string); + buf += ETH_GSTRING_LEN; + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { strcpy(buf, bnxt_port_stats_arr[i].string); -- cgit v1.2.3 From 4cebbaca12514986039b2ac7d30e36ecd2222f64 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:37 -0400 Subject: bnxt_en: Do not allow VF to read EEPROM. Firmware does not allow the operation and would return failure, causing a warning in dmesg. So check for VF and disallow it in the driver. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 0ea8466d531b..a699ca5493ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1818,6 +1818,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) static int bnxt_get_eeprom_len(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); + + if (BNXT_VF(bp)) + return 0; + /* The -1 return value allows the entire 32-bit range of offsets to be * passed via the ethtool command-line utility. */ -- cgit v1.2.3 From 05abe4ddf0010e15419f5a6758b5bf44b7790982 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Thu, 26 Apr 2018 17:44:38 -0400 Subject: bnxt_en: Increase RING_IDLE minimum threshold to 50 This keeps the RING_IDLE flag set in hardware for higher coalesce settings by default and improved latency. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b83c2ac17620..a221a10c4c29 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7702,7 +7702,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) coal->coal_bufs = 30; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; - coal->idle_thresh = 25; + coal->idle_thresh = 50; coal->bufs_per_record = 2; coal->budget = 64; /* NAPI budget */ -- cgit v1.2.3 From 9751e8e714872aa650b030e52a9fafbb694a3714 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Thu, 26 Apr 2018 17:44:39 -0400 Subject: bnxt_en: reduce timeout on initial HWRM calls Testing with DIM enabled on older kernels indicated that firmware calls were slower than expected. More detailed analysis indicated that the default 25us delay was higher than necessary. Reducing the time spend in usleep_range() for the first several calls would reduce the overall latency of firmware calls on newer Intel processors. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 26 +++++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 6 ++++++ 2 files changed, 29 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a221a10c4c29..ff9a5cd77138 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3495,15 +3495,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; + /* convert timeout to usec */ + timeout *= 1000; i = 0; - tmo_count = timeout * 40; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && i++ < tmo_count) { - usleep_range(25, 40); + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); } if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { @@ -3521,7 +3535,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, HWRM_RESP_LEN_SFT; if (len) break; - usleep_range(25, 40); + /* on first few passes, just barely sleep */ + if (i < DFLT_HWRM_CMD_TIMEOUT) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); } if (i >= tmo_count) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 057f8a2b5d13..7fa4a458edd0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -532,6 +532,12 @@ struct rx_tpa_end_cmp_ext { #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 -- cgit v1.2.3 From cabfb09d87bd7980cb4e39bd2ce679a788eb7e7a Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Thu, 26 Apr 2018 17:44:40 -0400 Subject: bnxt_en: add debugfs support for DIM This adds debugfs support for bnxt_en with the purpose of allowing users to examine the current DIM profile in use for each receive queue. This was instrumental in debugging issues found with DIM and ensuring that the profiles we expect to use are the profiles being used. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/Makefile | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 + drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c | 124 ++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h | 23 ++++ 5 files changed, 156 insertions(+) create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 7c560d545c03..5a779b19d149 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o +bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ff9a5cd77138..a45e692cd0ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -62,6 +62,7 @@ #include "bnxt_vfr.h" #include "bnxt_tc.h" #include "bnxt_devlink.h" +#include "bnxt_debugfs.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -6870,6 +6871,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { @@ -6902,6 +6904,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) return 0; open_err: + bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); bnxt_del_napi(bp); @@ -6995,6 +6998,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ + bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); @@ -9071,6 +9075,7 @@ static struct pci_driver bnxt_pci_driver = { static int __init bnxt_init(void) { + bnxt_debug_init(); return pci_register_driver(&bnxt_pci_driver); } @@ -9079,6 +9084,7 @@ static void __exit bnxt_exit(void) pci_unregister_driver(&bnxt_pci_driver); if (bnxt_pf_wq) destroy_workqueue(bnxt_pf_wq); + bnxt_debug_exit(); } module_init(bnxt_init); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7fa4a458edd0..8df1d8b9d2e3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1391,6 +1391,8 @@ struct bnxt { u16 *cfa_code_map; /* cfa_code -> vf_idx map */ u8 switch_id[8]; struct bnxt_tc_info *tc_info; + struct dentry *debugfs_pdev; + struct dentry *debugfs_dim; }; #define BNXT_RX_STATS_OFFSET(counter) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c new file mode 100644 index 000000000000..94e208e9789f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c @@ -0,0 +1,124 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include "bnxt_hsi.h" +#include +#include "bnxt.h" +#include "bnxt_debugfs.h" + +static struct dentry *bnxt_debug_mnt; + +static ssize_t debugfs_dim_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct net_dim *dim = filep->private_data; + int len; + char *buf; + + if (*ppos) + return 0; + if (!dim) + return -ENODEV; + buf = kasprintf(GFP_KERNEL, + "state = %d\n" \ + "profile_ix = %d\n" \ + "mode = %d\n" \ + "tune_state = %d\n" \ + "steps_right = %d\n" \ + "steps_left = %d\n" \ + "tired = %d\n", + dim->state, + dim->profile_ix, + dim->mode, + dim->tune_state, + dim->steps_right, + dim->steps_left, + dim->tired); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_dim_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dim_read, +}; + +static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx, + struct dentry *dd) +{ + static char qname[16]; + + snprintf(qname, 10, "%d", ring_idx); + return debugfs_create_file(qname, 0600, dd, + dim, &debugfs_dim_fops); +} + +void bnxt_debug_dev_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct dentry *pdevf; + int i; + + bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); + if (bp->debugfs_pdev) { + pdevf = debugfs_create_dir("dim", bp->debugfs_pdev); + if (!pdevf) { + pr_err("failed to create debugfs entry %s/dim\n", + pname); + return; + } + bp->debugfs_dim = pdevf; + /* create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) { + pdevf = debugfs_dim_ring_init(&cpr->dim, i, + bp->debugfs_dim); + if (!pdevf) + pr_err("failed to create debugfs entry %s/dim/%d\n", + pname, i); + } + } + } else { + pr_err("failed to create debugfs entry %s\n", pname); + } +} + +void bnxt_debug_dev_exit(struct bnxt *bp) +{ + if (bp) { + debugfs_remove_recursive(bp->debugfs_pdev); + bp->debugfs_pdev = NULL; + } +} + +void bnxt_debug_init(void) +{ + bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); + if (!bnxt_debug_mnt) + pr_err("failed to init bnxt_en debugfs\n"); +} + +void bnxt_debug_exit(void) +{ + debugfs_remove_recursive(bnxt_debug_mnt); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h new file mode 100644 index 000000000000..d0bb4887acd0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h @@ -0,0 +1,23 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_hsi.h" +#include "bnxt.h" + +#ifdef CONFIG_DEBUG_FS +void bnxt_debug_init(void); +void bnxt_debug_exit(void); +void bnxt_debug_dev_init(struct bnxt *bp); +void bnxt_debug_dev_exit(struct bnxt *bp); +#else +static inline void bnxt_debug_init(void) {} +static inline void bnxt_debug_exit(void) {} +static inline void bnxt_debug_dev_init(struct bnxt *bp) {} +static inline void bnxt_debug_dev_exit(struct bnxt *bp) {} +#endif -- cgit v1.2.3 From d8c09f19accb89fc08b246339abb005455e4c846 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:41 -0400 Subject: bnxt_en: Reserve rings in bnxt_set_channels() if device is down. The current code does not reserve rings during ethtool -L when the device is down. The rings will be reserved when the device is later opened. Change it to reserve rings during ethtool -L when the device is down. This provides a better guarantee that the device open will be successful when the rings are reserved ahead of time. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a699ca5493ef..ad98b78f5aa1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -584,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev, * to renable */ } + } else { + rc = bnxt_reserve_rings(bp); } return rc; -- cgit v1.2.3 From 2773dfb201e18722265c38dacdea6ecadf933064 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:42 -0400 Subject: bnxt_en: Don't reserve rings on VF when min rings were not provisioned by PF. When rings are more limited and the PF has not provisioned minimum guaranteed rings to the VF, do not reserve rings during driver probe. Wait till device open before reserving rings when they will be used. Device open will succeed if some minimum rings can be successfully reserved and allocated. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a45e692cd0ca..0884e4902668 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5952,6 +5952,9 @@ static int bnxt_init_msix(struct bnxt *bp) if (total_vecs > max) total_vecs = max; + if (!total_vecs) + return 0; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -7276,6 +7279,25 @@ skip_uc: return rc; } +static bool bnxt_can_reserve_rings(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* No minimum rings were provisioned by the PF. Don't + * reserve rings by default when device is down. + */ + if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) + return true; + + if (!netif_running(bp->dev)) + return false; + } +#endif + return true; +} + /* If the chip and firmware supports RFS */ static bool bnxt_rfs_supported(struct bnxt *bp) { @@ -7292,7 +7314,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; - if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) return false; vnics = 1 + bp->rx_nr_rings; @@ -8526,6 +8548,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) { int dflt_rings, max_rx_rings, max_tx_rings, rc; + if (!bnxt_can_reserve_rings(bp)) + return 0; + if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); -- cgit v1.2.3 From 86c3380d9b1e2a3fcc87d34cea12991b81032b9f Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:43 -0400 Subject: bnxt_en: Reserve RSS and L2 contexts for VF. For completeness and correctness, the VF driver needs to reserve these RSS and L2 contexts. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++++ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 10 +++++----- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 5 +++++ 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0884e4902668..fee1c0df146f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4713,6 +4713,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, vnics); + req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS); + req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 18ee471c0002..cc21d874eb6e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.min_rsscos_ctx = cpu_to_le16(1); - req.max_rsscos_ctx = cpu_to_le16(1); + req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { req.min_cmpl_rings = cpu_to_le16(1); req.min_tx_rings = cpu_to_le16(1); req.min_rx_rings = cpu_to_le16(1); - req.min_l2_ctxs = cpu_to_le16(1); + req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX); req.min_vnics = cpu_to_le16(1); req.min_stat_ctx = cpu_to_le16(1); req.min_hw_ring_grps = cpu_to_le16(1); @@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); req.min_tx_rings = cpu_to_le16(vf_tx_rings); req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(4); + req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req.min_vnics = cpu_to_le16(vf_vnics); req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); @@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); req.max_tx_rings = cpu_to_le16(vf_tx_rings); req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(4); + req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req.max_vnics = cpu_to_le16(vf_vnics); req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 6f6d85010ecf..e9b20cd19881 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -23,6 +23,11 @@ ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\ offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id)) +#define BNXT_VF_MIN_RSS_CTX 1 +#define BNXT_VF_MAX_RSS_CTX 1 +#define BNXT_VF_MIN_L2_CTX 1 +#define BNXT_VF_MAX_L2_CTX 4 + int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); int bnxt_set_vf_mac(struct net_device *, int, u8 *); int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); -- cgit v1.2.3 From 47558acd56a74c1ac598093930a5559270bf8c09 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Apr 2018 17:44:44 -0400 Subject: bnxt_en: Reserve rings at driver open if none was reserved at probe time. Add logic to reserve default rings at driver open time if none was reserved during probe time. This will happen when the PF driver did not provision minimum rings to the VF, due to more limited resources. Driver open will only succeed if some minimum rings can be reserved. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fee1c0df146f..efe5c7203f60 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6844,6 +6844,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp) } } +static int bnxt_init_dflt_ring_mode(struct bnxt *bp); + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -6851,6 +6853,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_preset_reg_win(bp); netif_carrier_off(bp->dev); if (irq_re_init) { + /* Reserve rings now if none were reserved at driver probe. */ + rc = bnxt_init_dflt_ring_mode(bp); + if (rc) { + netdev_err(bp->dev, "Failed to reserve default rings at open\n"); + return rc; + } rc = bnxt_reserve_rings(bp); if (rc) return rc; @@ -8600,6 +8608,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) return rc; } +static int bnxt_init_dflt_ring_mode(struct bnxt *bp) +{ + int rc; + + if (bp->tx_nr_rings) + return 0; + + rc = bnxt_set_dflt_rings(bp, true); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + return rc; + } + rc = bnxt_init_int_mode(bp); + if (rc) + return rc; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + bp->dev->features |= NETIF_F_NTUPLE; + } + return 0; +} + int bnxt_restore_pf_fw_resources(struct bnxt *bp) { int rc; -- cgit v1.2.3 From 6c3442f5f85827e40e314a74a24b1428e78748c8 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 27 Apr 2018 16:18:58 +0800 Subject: drivers: net: replace UINT64_MAX with U64_MAX U64_MAX is well defined now while the UINT64_MAX is not, so we fall back to drivers' own definition as below: #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) #endif I believe this is in one phy driver then copied and pasted to other phy drivers. Replace the UINT64_MAX with U64_MAX to clean up the source code. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 6 +++--- drivers/net/dsa/mv88e6xxx/chip.h | 4 ---- drivers/net/phy/bcm-phy-lib.c | 6 +----- drivers/net/phy/marvell.c | 5 +---- drivers/net/phy/micrel.c | 5 +---- drivers/net/phy/smsc.c | 5 +---- 6 files changed, 7 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8f92ccc0dd54..a13a7005ffa0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -665,13 +665,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, case STATS_TYPE_PORT: err = mv88e6xxx_port_read(chip, port, s->reg, ®); if (err) - return UINT64_MAX; + return U64_MAX; low = reg; if (s->size == 4) { err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); if (err) - return UINT64_MAX; + return U64_MAX; high = reg; } break; @@ -685,7 +685,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, mv88e6xxx_g1_stats_read(chip, reg + 1, &high); break; default: - return UINT64_MAX; + return U64_MAX; } value = (((u64)high) << 16) | low; return value; diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 80490f66bc06..4163c8099d0b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -21,10 +21,6 @@ #include #include -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif - #define SMI_CMD 0x00 #define SMI_CMD_BUSY BIT(15) #define SMI_CMD_CLAUSE_22 BIT(12) diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 5ad130c3da43..0876aec7328c 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -346,10 +346,6 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data) } EXPORT_SYMBOL_GPL(bcm_phy_get_strings); -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif - /* Caller is supposed to provide appropriate storage for the library code to * access the shadow copy */ @@ -362,7 +358,7 @@ static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow, val = phy_read(phydev, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val >>= stat.shift; val = val & ((1 << stat.bits) - 1); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 25e2a099b71c..b8f57e9b9379 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1482,9 +1482,6 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; @@ -1494,7 +1491,7 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) val = phy_read_paged(phydev, stat.page, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index f41b224a9cdb..de31c5170a5b 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -650,9 +650,6 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 kszphy_get_stat(struct phy_device *phydev, int i) { struct kszphy_hw_stat stat = kszphy_hw_stats[i]; @@ -662,7 +659,7 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i) val = phy_read(phydev, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index be399d645224..c328208388da 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -168,9 +168,6 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 smsc_get_stat(struct phy_device *phydev, int i) { struct smsc_hw_stat stat = smsc_hw_stats[i]; @@ -179,7 +176,7 @@ static u64 smsc_get_stat(struct phy_device *phydev, int i) val = phy_read(phydev, stat.reg); if (val < 0) - ret = UINT64_MAX; + ret = U64_MAX; else ret = val; -- cgit v1.2.3 From 9e8d438e8ba43a38def2890a65a26917f2233a83 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 27 Apr 2018 12:41:49 -0700 Subject: net: phy: Fix modular PHYLIB build After commit c59530d0d5dc ("net: Move PHY statistics code into PHY library helpers") we made net/core/ethtool.c reference symbols which are part of the library which can be modular. David introduced a temporary fix with 1ecd6e8ad996 ("phy: Temporary build fix after phylib changes.") which would prevent such modularity. This is not desireable of course, so instead, just inline the functions into include/linux/phy.h to keep both options available. Fixes: c59530d0d5dc ("net: Move PHY statistics code into PHY library helpers") Fixes: 1ecd6e8ad996 ("phy: Temporary build fix after phylib changes.") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 3 ++- drivers/net/phy/phy.c | 48 ----------------------------------------------- include/linux/phy.h | 50 +++++++++++++++++++++++++++++++++++++------------ 3 files changed, 40 insertions(+), 61 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7c5e8c1e9370..edb8b9ab827f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -9,6 +9,7 @@ menuconfig MDIO_DEVICE config MDIO_BUS tristate + default m if PHYLIB=m default MDIO_DEVICE help This internal symbol is used for link time dependencies and it @@ -170,7 +171,7 @@ config PHYLINK autonegotiation modes. menuconfig PHYLIB - bool "PHY Device support and infrastructure" + tristate "PHY Device support and infrastructure" depends on NETDEVICES select MDIO_DEVICE help diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a98ed12c0009..05c1e8ef15e6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1277,51 +1277,3 @@ int phy_ethtool_nway_reset(struct net_device *ndev) return phy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); - -int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_strings(phydev, data); - mutex_unlock(&phydev->lock); - - return 0; -} -EXPORT_SYMBOL(phy_ethtool_get_strings); - -int phy_ethtool_get_sset_count(struct phy_device *phydev) -{ - int ret; - - if (!phydev->drv) - return -EIO; - - if (phydev->drv->get_sset_count && - phydev->drv->get_strings && - phydev->drv->get_stats) { - mutex_lock(&phydev->lock); - ret = phydev->drv->get_sset_count(phydev); - mutex_unlock(&phydev->lock); - - return ret; - } - - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(phy_ethtool_get_sset_count); - -int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_stats(phydev, stats, data); - mutex_unlock(&phydev->lock); - - return 0; -} -EXPORT_SYMBOL(phy_ethtool_get_stats); diff --git a/include/linux/phy.h b/include/linux/phy.h index 6ca81395c545..073235e70442 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1066,27 +1066,53 @@ int phy_ethtool_nway_reset(struct net_device *ndev); #if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); void mdio_bus_exit(void); -int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); -int phy_ethtool_get_sset_count(struct phy_device *phydev); -int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data); -#else -int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +#endif + +/* Inline function for use within net/core/ethtool.c (built-in) */ +static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) { - return -EOPNOTSUPP; + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_strings(phydev, data); + mutex_unlock(&phydev->lock); + + return 0; } -int phy_ethtool_get_sset_count(struct phy_device *phydev) +static inline int phy_ethtool_get_sset_count(struct phy_device *phydev) { + int ret; + + if (!phydev->drv) + return -EIO; + + if (phydev->drv->get_sset_count && + phydev->drv->get_strings && + phydev->drv->get_stats) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_sset_count(phydev); + mutex_unlock(&phydev->lock); + + return ret; + } + return -EOPNOTSUPP; } -int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) +static inline int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) { - return -EOPNOTSUPP; + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_stats(phydev, stats, data); + mutex_unlock(&phydev->lock); + + return 0; } -#endif extern struct bus_type mdio_bus_type; -- cgit v1.2.3 From b28f872dc48a4d55cba9d51cf3bac03ca32b2747 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 26 Apr 2018 21:56:44 -0400 Subject: net: dsa: mv88e6xxx: move trunk setup Move the trunking setup out of Global 2 specific setup into the top level mv88e6xxx_setup function. Note that the 88E6390 family calls this LAG instead of Trunk and supports 32 possible ID routing vectors, with LAG ID bit 4 being placed in Global 2 register 0x1D... We don't need Trunk (or LAG) IDs for the moment, thus keep it simple. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 13 +++++++++++++ drivers/net/dsa/mv88e6xxx/global2.c | 7 +------ drivers/net/dsa/mv88e6xxx/global2.h | 7 +++++++ 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index a13a7005ffa0..02ff41338491 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1026,6 +1026,15 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip) +{ + /* Clear all trunk masks and mapping */ + if (chip->info->global2_addr) + return mv88e6xxx_g2_trunk_clear(chip); + + return 0; +} + static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) { if (chip->info->ops->pot_clear) @@ -2239,6 +2248,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_trunk_setup(chip); + if (err) + goto unlock; + /* Setup PTP Hardware Clock and timestamping */ if (chip->info->ptp_support) { err = mv88e6xxx_ptp_setup(chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 0ce627fded48..3f24763aedc5 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -174,7 +174,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id, return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val); } -static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) { const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1; int i, err; @@ -1159,10 +1159,5 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) if (err) return err; - /* Clear all trunk masks and mapping. */ - err = mv88e6xxx_g2_clear_trunk(chip); - if (err) - return err; - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 520ec70d32e8..7bd4ab31a93e 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -327,6 +327,8 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); + extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -495,6 +497,11 @@ static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ -- cgit v1.2.3 From c7f047b6c7bc3f6740a8fb1f2d03ecb19022fdc0 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 26 Apr 2018 21:56:45 -0400 Subject: net: dsa: mv88e6xxx: move device mapping setup Move the Device Mapping setup out of the specific Global 2 code, into the top level device setup function. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 27 +++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/global2.c | 37 ++++++------------------------------- drivers/net/dsa/mv88e6xxx/global2.h | 12 +++++++++++- 3 files changed, 44 insertions(+), 32 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 02ff41338491..d1ca4d6ef9be 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1026,6 +1026,29 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) +{ + int target, port; + int err; + + if (!chip->info->global2_addr) + return 0; + + /* Initialize the routing port to the 32 possible target devices */ + for (target = 0; target < 32; target++) { + port = 0x1f; + if (target < DSA_MAX_SWITCHES) + if (chip->ds->rtable[target] != DSA_RTABLE_NONE) + port = chip->ds->rtable[target]; + + err = mv88e6xxx_g2_device_mapping_write(chip, target, port); + if (err) + return err; + } + + return 0; +} + static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip) { /* Clear all trunk masks and mapping */ @@ -2252,6 +2275,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_devmap_setup(chip); + if (err) + goto unlock; + /* Setup PTP Hardware Clock and timestamping */ if (chip->info->ptp_support) { err = mv88e6xxx_ptp_setup(chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 3f24763aedc5..96e74d8d500d 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -119,37 +119,17 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) /* Offset 0x06: Device Mapping Table register */ -static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, - int target, int port) +int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, + int port) { - u16 val = (target << 8) | (port & 0xf); + u16 val = (target << 8) | (port & 0x1f); + /* Modern chips use 5 bits to define a device mapping port, + * but bit 4 is reserved on older chips, so it is safe to use. + */ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val); } -static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip) -{ - int target, port; - int err; - - /* Initialize the routing port to the 32 possible target devices */ - for (target = 0; target < 32; ++target) { - port = 0xf; - - if (target < DSA_MAX_SWITCHES) { - port = chip->ds->rtable[target]; - if (port == DSA_RTABLE_NONE) - port = 0xf; - } - - err = mv88e6xxx_g2_device_mapping_write(chip, target, port); - if (err) - break; - } - - return err; -} - /* Offset 0x07: Trunk Mask Table register */ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, @@ -1154,10 +1134,5 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) if (err) return err; - /* Program the DSA routing table. */ - err = mv88e6xxx_g2_set_device_mapping(chip); - if (err) - return err; - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7bd4ab31a93e..46913a289255 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -60,7 +60,8 @@ #define MV88E6XXX_G2_DEVICE_MAPPING 0x06 #define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 #define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00 -#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f +#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f +#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f /* Offset 0x07: Trunk Mask Table Register */ #define MV88E6XXX_G2_TRUNK_MASK 0x07 @@ -329,6 +330,9 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, + int port); + extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -502,6 +506,12 @@ static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, + int target, int port) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ -- cgit v1.2.3 From 5d49d60307aaa69541beafdd17bd3d0ea238732b Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 26 Apr 2018 21:56:46 -0400 Subject: net: dsa: mv88e6xxx: remove Global 2 setup The remaining values written to the Switch Management Register in the mv88e6xxx_g2_setup function are specific to 88E6352 and older, and are the default values anyway. Thus remove completely this function. The mv88e6xxx driver no more contains setup code to access arbitrary Global 2 registers. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 7 ------- drivers/net/dsa/mv88e6xxx/global2.c | 18 ------------------ drivers/net/dsa/mv88e6xxx/global2.h | 6 ------ 3 files changed, 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d1ca4d6ef9be..9d62e4acc01b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2228,13 +2228,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; - /* Setup Switch Global 2 Registers */ - if (chip->info->global2_addr) { - err = mv88e6xxx_g2_setup(chip); - if (err) - goto unlock; - } - err = mv88e6xxx_irl_setup(chip); if (err) goto unlock; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 96e74d8d500d..e6d658181b27 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1118,21 +1118,3 @@ void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, for (phy = 0; phy < chip->info->num_internal_phys; phy++) irq_dispose_mapping(bus->irq[phy]); } - -int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) -{ - u16 reg; - int err; - - /* Ignore removed tag data on doubly tagged packets, disable - * flow control messages, force flow control priority to the - * highest, and send all special multicast frames to the CPU - * port at the highest priority. - */ - reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4); - err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg); - if (err) - return err; - - return 0; -} diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 46913a289255..37e8ce2c72a0 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -314,7 +314,6 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, int src_port, u16 data); int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); -int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -447,11 +446,6 @@ static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) return -EOPNOTSUPP; } -static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) -{ - return -EOPNOTSUPP; -} - static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; -- cgit v1.2.3 From 89b36fb5e53244701f7ddec0ece0e9a48fdb3b11 Mon Sep 17 00:00:00 2001 From: Raghuram Chary J Date: Sat, 28 Apr 2018 11:33:14 +0530 Subject: lan78xx: Lan7801 Support for Fixed PHY Adding Fixed PHY support to the lan78xx driver. Signed-off-by: Raghuram Chary J Signed-off-by: David S. Miller --- drivers/net/usb/Kconfig | 1 + drivers/net/usb/lan78xx.c | 104 +++++++++++++++++++++++++++++++++------------- 2 files changed, 77 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index f28bd74ac275..418b0904cecb 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -111,6 +111,7 @@ config USB_LAN78XX select MII select PHYLIB select MICROCHIP_PHY + select FIXED_PHY help This option adds support for Microchip LAN78XX based USB 2 & USB 3 10/100/1000 Ethernet adapters. diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index c59f8afd0d73..81dfd10c3b92 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include "lan78xx.h" @@ -2063,52 +2063,91 @@ static int ksz9031rnx_fixup(struct phy_device *phydev) return 1; } -static int lan78xx_phy_init(struct lan78xx_net *dev) +static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) { + u32 buf; int ret; - u32 mii_adv; + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; struct phy_device *phydev; phydev = phy_find_first(dev->mdiobus); if (!phydev) { - netdev_err(dev->net, "no PHY found\n"); - return -EIO; - } - - if ((dev->chipid == ID_REV_CHIP_ID_7800_) || - (dev->chipid == ID_REV_CHIP_ID_7850_)) { - phydev->is_internal = true; - dev->interface = PHY_INTERFACE_MODE_GMII; - - } else if (dev->chipid == ID_REV_CHIP_ID_7801_) { + netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n"); + phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, + NULL); + if (IS_ERR(phydev)) { + netdev_err(dev->net, "No PHY/fixed_PHY found\n"); + return NULL; + } + netdev_dbg(dev->net, "Registered FIXED PHY\n"); + dev->interface = PHY_INTERFACE_MODE_RGMII; + ret = lan78xx_write_reg(dev, MAC_RGMII_ID, + MAC_RGMII_ID_TXC_DELAY_EN_); + ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_CLK125_EN_; + buf |= HW_CFG_REFCLK25_EN_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + } else { if (!phydev->drv) { netdev_err(dev->net, "no PHY driver found\n"); - return -EIO; + return NULL; } - dev->interface = PHY_INTERFACE_MODE_RGMII; - /* external PHY fixup for KSZ9031RNX */ ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, ksz9031rnx_fixup); if (ret < 0) { netdev_err(dev->net, "fail to register fixup\n"); - return ret; + return NULL; } /* external PHY fixup for LAN8835 */ ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, lan8835_fixup); if (ret < 0) { netdev_err(dev->net, "fail to register fixup\n"); - return ret; + return NULL; } /* add more external PHY fixup here if needed */ phydev->is_internal = false; - } else { - netdev_err(dev->net, "unknown ID found\n"); - ret = -EIO; - goto error; + } + return phydev; +} + +static int lan78xx_phy_init(struct lan78xx_net *dev) +{ + int ret; + u32 mii_adv; + struct phy_device *phydev; + + switch (dev->chipid) { + case ID_REV_CHIP_ID_7801_: + phydev = lan7801_phy_init(dev); + if (!phydev) { + netdev_err(dev->net, "lan7801: PHY Init Failed"); + return -EIO; + } + break; + + case ID_REV_CHIP_ID_7800_: + case ID_REV_CHIP_ID_7850_: + phydev = phy_find_first(dev->mdiobus); + if (!phydev) { + netdev_err(dev->net, "no PHY found\n"); + return -EIO; + } + phydev->is_internal = true; + dev->interface = PHY_INTERFACE_MODE_GMII; + break; + + default: + netdev_err(dev->net, "Unknown CHIP ID found\n"); + return -EIO; } /* if phyirq is not set, use polling mode in phylib */ @@ -2127,6 +2166,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (ret) { netdev_err(dev->net, "can't attach PHY to %s\n", dev->mdiobus->id); + if (dev->chipid == ID_REV_CHIP_ID_7801_) { + if (phy_is_pseudo_fixed_link(phydev)) { + fixed_phy_unregister(phydev); + } else { + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, + 0xfffffff0); + phy_unregister_fixup_for_uid(PHY_LAN8835, + 0xfffffff0); + } + } return -EIO; } @@ -2166,12 +2215,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) dev->fc_autoneg = phydev->autoneg; return 0; - -error: - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - - return ret; } static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) @@ -3569,6 +3612,7 @@ static void lan78xx_disconnect(struct usb_interface *intf) struct lan78xx_net *dev; struct usb_device *udev; struct net_device *net; + struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -3577,12 +3621,16 @@ static void lan78xx_disconnect(struct usb_interface *intf) udev = interface_to_usbdev(intf); net = dev->net; + phydev = net->phydev; phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); phy_disconnect(net->phydev); + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); + unregister_netdev(net); cancel_delayed_work_sync(&dev->wq); -- cgit v1.2.3 From e92258c761130c57567b7f051ea664957ce245ba Mon Sep 17 00:00:00 2001 From: Raghuram Chary J Date: Sat, 28 Apr 2018 11:33:15 +0530 Subject: lan78xx: Remove DRIVER_VERSION for lan78xx driver Remove driver version info from the lan78xx driver. Signed-off-by: Raghuram Chary J Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 81dfd10c3b92..54f8db887e3d 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -44,7 +44,6 @@ #define DRIVER_AUTHOR "WOOJUNG HUH " #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.6" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -1503,7 +1502,6 @@ static void lan78xx_get_drvinfo(struct net_device *net, struct lan78xx_net *dev = netdev_priv(net); strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } -- cgit v1.2.3 From 7670ed7a25943a77af64ec4a7247a504e98fe812 Mon Sep 17 00:00:00 2001 From: Raghuram Chary J Date: Sat, 28 Apr 2018 11:33:16 +0530 Subject: lan78xx: Modify error messages Modify the error messages when phy registration fails. Signed-off-by: Raghuram Chary J Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 54f8db887e3d..91761436709a 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2100,14 +2100,14 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, ksz9031rnx_fixup); if (ret < 0) { - netdev_err(dev->net, "fail to register fixup\n"); + netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n"); return NULL; } /* external PHY fixup for LAN8835 */ ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, lan8835_fixup); if (ret < 0) { - netdev_err(dev->net, "fail to register fixup\n"); + netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n"); return NULL; } /* add more external PHY fixup here if needed */ -- cgit v1.2.3 From b86629ebd5447c1e263d89be765ba6fd747c5e4d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 27 Apr 2018 14:06:45 -0400 Subject: mlx4: Don't bother using skb_tx_hash in mlx4_en_select_queue The code in the fallback path has supported XDP in conjunction with the Tx traffic classification for TCs for over a year now. So instead of just calling skb_tx_hash for every packet we are better off using the fallback since that will record the Tx queue to the socket and then that can be used instead of having to recompute the hash every time. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 6b6853773848..0227786308af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return skb_tx_hash(dev, skb); + return fallback(dev, skb); return fallback(dev, skb) % rings_p_up; } -- cgit v1.2.3 From 97389373d555cdc03ed346d7dc6f714a1586c705 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 18 Apr 2018 00:27:18 +0200 Subject: mt76x2: fix is_mt7612 routine Fix is_mt7612 routine since asic version is set in mt76_dev revision field and not in mt76x2_dev one. Moreover remove mt76x2_dev rev field since it is never used in the driver Fixes: 7bc04215a66b ('mt76: add driver code for MT76x2e') Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 783b8122ec3c..a5d1255e4b9c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -117,7 +117,6 @@ struct mt76x2_dev { u8 beacon_mask; u8 beacon_data_mask; - u32 rev; u32 rxfilter; u16 chainmask; @@ -151,7 +150,7 @@ struct mt76x2_sta { static inline bool is_mt7612(struct mt76x2_dev *dev) { - return (dev->rev >> 16) == 0x7612; + return mt76_chip(&dev->mt76) == 0x7612; } void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); -- cgit v1.2.3 From c3d7c82a8bb017e43cafe8eaf7c8309f85ceb781 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 25 Apr 2018 11:11:21 +0200 Subject: mt76: fix concurrent rx calls on A-MPDU release Add a spinlock in mt76_rx_complete. Without this, multiple stats updates could happen in parallel, which can lead to deadlocks. There are probably more corner cases fixed by this change. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 2 ++ drivers/net/wireless/mediatek/mt76/mt76.h | 1 + drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 1 + 3 files changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index eb49e0a6758c..915e61733131 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -561,6 +561,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, if (queue >= 0) napi = &dev->napi[queue]; + spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { if (mt76_check_ccmp_pn(skb)) { dev_kfree_skb(skb); @@ -570,6 +571,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, sta = mt76_rx_convert(skb); ieee80211_rx_napi(dev->hw, sta, skb, napi); } + spin_unlock(&dev->rx_lock); } void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 065ff78059c3..a74e6eef51e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -241,6 +241,7 @@ struct mt76_dev { struct device *dev; struct net_device napi_dev; + spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 359b105235b3..d21e4a7c1bb9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -645,6 +645,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) dev->mt76.drv = &drv_ops; mutex_init(&dev->mutex); spin_lock_init(&dev->irq_lock); + spin_lock_init(&dev->mt76.rx_lock); return dev; } -- cgit v1.2.3 From 9febfa67ca1566294ea5fd1f95ebf02181e7a233 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 25 Apr 2018 11:11:22 +0200 Subject: mt76: add rcu locking in tid reorder function Avoids having the tid or station entry disappear prematurely. Also cancel the reorder work earlier to avoid further processing delayed by waiting for the lock to be released Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index dbf4057d2d3e..b67acc6189bf 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -103,6 +103,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) __skb_queue_head_init(&frames); local_bh_disable(); + rcu_read_lock(); spin_lock(&tid->lock); mt76_rx_aggr_check_release(tid, &frames); @@ -114,6 +115,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) REORDER_TIMEOUT); mt76_rx_complete(dev, &frames, -1); + rcu_read_unlock(); local_bh_enable(); } @@ -266,6 +268,8 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) u8 size = tid->size; int i; + cancel_delayed_work(&tid->reorder_work); + spin_lock_bh(&tid->lock); tid->stopped = true; @@ -280,8 +284,6 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) } spin_unlock_bh(&tid->lock); - - cancel_delayed_work(&tid->reorder_work); } void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) -- cgit v1.2.3 From 1d868b70e06a2319fdda46cc46ec7c6762557543 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 25 Apr 2018 11:11:23 +0200 Subject: mt76: add rcu locking around tx scheduling Fixes a reported lockdep error in mac80211: [ 179.867321] ============================= [ 179.871510] WARNING: suspicious RCU usage [ 179.875528] 4.14.32 #0 Not tainted [ 179.878924] ----------------------------- [ 179.882981] backports-2017-11-01/net/mac80211/tx.c:594 suspicious rcu_dereference_check() usage! [ 179.891785] [ 179.891785] other info that might help us debug this: [ 179.891785] [ 179.899824] [ 179.899824] rcu_scheduler_active = 2, debug_locks = 1 [ 179.906343] 2 locks held by ksoftirqd/0/7: [ 179.910479] #0: (&(&q->lock)->rlock){+.-.}, at: [<86b207a4>] mt76_dma_tx_cleanup+0x64/0x354 [mt76] [ 179.919734] #1: (&(&fq->lock)->rlock){+.-.}, at: [<87238410>] ieee80211_tx_dequeue+0x54/0xc3c [mac80211] [ 179.929890] [ 179.929890] stack backtrace: [ 179.934257] CPU: 0 PID: 7 Comm: ksoftirqd/0 Not tainted 4.14.32 #0 [ 179.940421] Stack : 00000000 00000000 00000000 00000000 80e0fce2 00000036 00000000 00000000 [ 179.948864] 87c3d24c 80696377 8061039c 00000000 00000007 00000001 87c5db78 6534689d [ 179.957306] 00000000 00000000 80e10000 87c5da74 00000001 0000015a 00000007 00000000 [ 179.965748] 00000000 806a0000 000e4171 00000000 00000000 00000000 ffffffff 00000001 [ 179.974189] 806c0000 8692b240 86b000d0 87316fe4 00000001 802c9a68 00000000 80700000 [ 179.982632] ... [ 179.985104] Call Trace: [ 179.987582] [<80010a48>] show_stack+0x58/0x100 [ 179.992040] [<804c2c58>] dump_stack+0xe8/0x170 [ 179.996868] [<87234a04>] ieee80211_tx_h_select_key+0xa8/0x5b8 [mac80211] [ 180.004299] [<87238d44>] ieee80211_tx_dequeue+0x988/0xc3c [mac80211] [ 180.011048] [<86b230dc>] mt76_txq_schedule+0x110/0x3a4 [mt76] [ 180.016821] [<86b209d0>] mt76_dma_tx_cleanup+0x290/0x354 [mt76] [ 180.022777] [<86be2e60>] mt7603_tx_tasklet+0x40/0x6c [mt7603e] [ 180.028637] [<80037058>] tasklet_action+0x110/0x1ec [ 180.033532] [<804e1dac>] __do_softirq+0x164/0x35c [ 180.038235] [<80037174>] run_ksoftirqd+0x40/0x84 [ 180.042870] [<800580c8>] smpboot_thread_fn+0x1a8/0x1d8 [ 180.048023] [<800542e8>] kthread+0x130/0x144 [ 180.052297] [<8000b1f8>] ret_from_kernel_thread+0x14/0x1c Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/tx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 4eef69bd8a9e..6aca794cf998 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -422,12 +422,14 @@ void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) { int len; + rcu_read_lock(); do { if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) break; len = mt76_txq_schedule_list(dev, hwq); } while (len > 0); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(mt76_txq_schedule); -- cgit v1.2.3 From 95135e8c0b4add86d3ce1ab5a24881b673c2db04 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 25 Apr 2018 11:11:24 +0200 Subject: mt76: check for pending reset before attempting to schedule tx The check within mt76_txq_send_burst is not enough, as it happens after a first frame has already been queued up Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/tx.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 6aca794cf998..7ecd2d7c5db4 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -385,6 +385,10 @@ restart: bool empty = false; int cur; + if (test_bit(MT76_SCANNING, &dev->state) || + test_bit(MT76_RESET, &dev->state)) + return -EBUSY; + mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); if (mtxq->send_bar && mtxq->aggr) { struct ieee80211_txq *txq = mtxq_to_txq(mtxq); -- cgit v1.2.3 From c126e1995f79fb487aa99bbead0f23716b43db01 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 24 Apr 2018 15:18:02 +0200 Subject: mwifiex: fix mwifiex_hard_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index b6484582845a..802fb3ecce97 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -858,7 +858,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, /* * CFG802.11 network device handler for data transmission. */ -static int +static netdev_tx_t mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); -- cgit v1.2.3 From 307857db47ebdcd56ecf26ff084cf9b966a67e17 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 25 Apr 2018 17:38:12 +0800 Subject: mwifiex: make firmware mac address consistent with host configuration For user configurated mac address, directly set to firmware with no change. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 +-- drivers/net/wireless/marvell/mwifiex/main.c | 38 ++++++++++++++----------- drivers/net/wireless/marvell/mwifiex/main.h | 3 +- 3 files changed, 26 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4857b75e54a7..54a2297010d2 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -929,7 +929,7 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, adapter->rx_locked = false; spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); - mwifiex_set_mac_address(priv, dev); + mwifiex_set_mac_address(priv, dev, false, NULL); return 0; } @@ -2979,7 +2979,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->netdev = dev; if (!adapter->mfg_mode) { - mwifiex_set_mac_address(priv, dev); + mwifiex_set_mac_address(priv, dev, false, NULL); ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL, true); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 802fb3ecce97..99eaff97315c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -940,28 +940,35 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } int mwifiex_set_mac_address(struct mwifiex_private *priv, - struct net_device *dev) + struct net_device *dev, bool external, + u8 *new_mac) { int ret; u64 mac_addr, old_mac_addr; - if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) - return -ENOTSUPP; + old_mac_addr = ether_addr_to_u64(priv->curr_addr); - mac_addr = ether_addr_to_u64(priv->curr_addr); - old_mac_addr = mac_addr; + if (external) { + mac_addr = ether_addr_to_u64(new_mac); + } else { + /* Internal mac address change */ + if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) + return -ENOTSUPP; - if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) - mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + mac_addr = old_mac_addr; - if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { - /* Set mac address based on bss_type/bss_num */ - mac_addr ^= BIT_ULL(priv->bss_type + 8); - mac_addr += priv->bss_num; - } + if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) + mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); - if (mac_addr == old_mac_addr) - goto done; + if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { + /* Set mac address based on bss_type/bss_num */ + mac_addr ^= BIT_ULL(priv->bss_type + 8); + mac_addr += priv->bss_num; + } + + if (mac_addr == old_mac_addr) + goto done; + } u64_to_ether_addr(mac_addr, priv->curr_addr); @@ -989,8 +996,7 @@ mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sockaddr *hw_addr = addr; - memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); - return mwifiex_set_mac_address(priv, dev); + return mwifiex_set_mac_address(priv, dev, true, hw_addr->sa_data); } /* diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 9bde181700dc..7c95c1279548 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1709,7 +1709,8 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); int mwifiex_set_mac_address(struct mwifiex_private *priv, - struct net_device *dev); + struct net_device *dev, + bool external, u8 *new_mac); void mwifiex_devdump_tmo_func(unsigned long function_context); #ifdef CONFIG_DEBUG_FS -- cgit v1.2.3 From c3b2a34b82db21bf46e00fbfa1b3c3ccaa0ec18f Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 25 Apr 2018 17:38:13 +0800 Subject: mwifiex: always configure firmware mac address during changing virtual interface When interface type changed, firmware using a new connction pointer. We need Re-configure the mac address. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 99eaff97315c..40c80e38a74b 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -965,9 +965,6 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, mac_addr ^= BIT_ULL(priv->bss_type + 8); mac_addr += priv->bss_num; } - - if (mac_addr == old_mac_addr) - goto done; } u64_to_ether_addr(mac_addr, priv->curr_addr); @@ -983,7 +980,6 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, return ret; } -done: ether_addr_copy(dev->dev_addr, priv->curr_addr); return 0; } -- cgit v1.2.3 From 7cce13954f0e6b16b4c18a3985387018f2e3e44e Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 25 Apr 2018 17:38:14 +0800 Subject: mwifiex: keep user configured mac address during changing virtual interface During changing virtual interface, keep using previous net device mac address. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 40c80e38a74b..510f6b8e717d 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1333,7 +1333,10 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK; priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; - ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); + if (is_valid_ether_addr(dev->dev_addr)) + ether_addr_copy(priv->curr_addr, dev->dev_addr); + else + ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { -- cgit v1.2.3 From 6e1d8d1470b2c9335715f7d52e864f0bd91a5f59 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Tue, 10 Apr 2018 21:54:19 +0800 Subject: net: wireless: b43legacy: Replace GFP_ATOMIC with GFP_KERNEL in dma_tx_fragment dma_tx_fragment() is never called in atomic context. dma_tx_fragment() is only called by b43legacy_dma_tx(), which is only called by b43legacy_tx_work(). b43legacy_tx_work() is only set a parameter of INIT_WORK() in b43legacy_wireless_init(). Despite never getting called from atomic context, dma_tx_fragment() calls alloc_skb() with GFP_ATOMIC, which does not sleep for allocation. GFP_ATOMIC is not necessary and can be replaced with GFP_KERNEL, which can sleep and improve the possibility of sucessful allocation. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43legacy/dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index cfa617ddb2f1..2f0c64cef65f 100644 --- a/drivers/net/wireless/broadcom/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c @@ -1064,7 +1064,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; -- cgit v1.2.3 From 3ff6ee28535f2f6698cc2b3acfd2af6942036067 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 24 Apr 2018 15:18:04 +0200 Subject: qtnfmac: fix qtnf_netdev_hard_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Reviewed-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index cf26c15a84f8..b3bfb4faa918 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -76,7 +76,7 @@ static int qtnf_netdev_close(struct net_device *ndev) /* Netdev handler for data transmission. */ -static int +static netdev_tx_t qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct qtnf_vif *vif; -- cgit v1.2.3 From 60f36637bbbdeea199d644eda284dba5144d2377 Mon Sep 17 00:00:00 2001 From: Eyal Reizer Date: Thu, 26 Apr 2018 08:47:11 +0300 Subject: wlcore: sdio: allow pm to handle sdio power pm_runtime handles sdio power on and power off transitions. An old workaround for trying to control the power explicitly from the driver was in fact causing failures on suspend/resume as the mmc layer already power the module on resume. In case of resume pm_runtime_get sync returns a positive device's usage count causing the driver to try an re-initialize an already initialized device. This was causing sdio bus failure on resume. Remove this manual power on/off sequence as it is in-fact not needed. Signed-off-by: Eyal Reizer Acked-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sdio.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 1f727babbea0..6dbe61d47dc3 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -155,17 +155,11 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) struct mmc_card *card = func->card; ret = pm_runtime_get_sync(&card->dev); - if (ret) { - /* - * Runtime PM might be temporarily disabled, or the device - * might have a positive reference counter. Make sure it is - * really powered on. - */ - ret = mmc_power_restore_host(card->host); - if (ret < 0) { - pm_runtime_put_sync(&card->dev); - goto out; - } + if (ret < 0) { + pm_runtime_put_noidle(&card->dev); + dev_err(glue->dev, "%s: failed to get_sync(%d)\n", + __func__, ret); + goto out; } sdio_claim_host(func); @@ -178,7 +172,6 @@ out: static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { - int ret; struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; @@ -186,16 +179,8 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) sdio_disable_func(func); sdio_release_host(func); - /* Power off the card manually in case it wasn't powered off above */ - ret = mmc_power_save_host(card->host); - if (ret < 0) - goto out; - /* Let runtime PM know the card is powered off */ - pm_runtime_put_sync(&card->dev); - -out: - return ret; + return pm_runtime_put_sync(&card->dev); } static int wl12xx_sdio_set_power(struct device *child, bool enable) -- cgit v1.2.3 From e1fd7ceec194225a822760a40dfa73efdd59881b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 26 Apr 2018 08:01:38 -0500 Subject: rsi_91x: fix structurally dead code Function rsi_hal_key_config returns before reaching code at line 922 if (status), hence this code is structurally dead. Fix this by storing the value returned by rsi_hal_load_key into _status_ for its further evaluation and use. Addresses-Coverity-ID: 1468409 ("Structurally dead code") Fixes: 4fd6c4762f37 ("rsi: roaming enhancements") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 766d874cc6e2..80e7f4f4f188 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -911,14 +911,14 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, } } - return rsi_hal_load_key(adapter->priv, - key->key, - key->keylen, - key_type, - key->keyidx, - key->cipher, - sta_id, - vif); + status = rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + key_type, + key->keyidx, + key->cipher, + sta_id, + vif); if (status) return status; -- cgit v1.2.3 From 48c6b5c9c1180b7f8b35edeaef9b3aa3e3b6c9d5 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 26 Apr 2018 08:13:24 -0500 Subject: rsi_91x: fix uninitialized variable There is a potential execution path in which variable ret is returned without being properly initialized previously. Fix this by storing the value returned by function rsi_usb_master_reg_write into _ret_. Addresses-Coverity-ID: 1468407 ("Uninitialized scalar variable") Fixes: 16d3bb7b2f37 ("rsi: disable fw watchdog timer during reset") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index b065438f51b2..6ce6b754df12 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -687,9 +687,10 @@ static int rsi_reset_card(struct rsi_hw *adapter) */ msleep(100); - if (rsi_usb_master_reg_write(adapter, SWBL_REGOUT, - RSI_FW_WDT_DISABLE_REQ, - RSI_COMMON_REG_SIZE) < 0) { + ret = rsi_usb_master_reg_write(adapter, SWBL_REGOUT, + RSI_FW_WDT_DISABLE_REQ, + RSI_COMMON_REG_SIZE); + if (ret < 0) { rsi_dbg(ERR_ZONE, "Disabling firmware watchdog timer failed\n"); goto fail; } -- cgit v1.2.3 From cc282a0c5f0823bfcf084103445c69894129227c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 18 Apr 2018 12:47:50 +0100 Subject: rt2x00: fix spelling mistake in various macros, UKNOWN -> UNKNOWN Rename several macros that contain mispellings of UNKNOWN Signed-off-by: Colin Ian King Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 7a133e94b1bf..b05ed2f3025a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -1194,10 +1194,10 @@ #define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0) #define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00) #define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000) -#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000) -#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000) -#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000) -#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000) +#define TX_PWR_CFG_3_UNKNOWN1 FIELD32(0x000f0000) +#define TX_PWR_CFG_3_UNKNOWN2 FIELD32(0x00f00000) +#define TX_PWR_CFG_3_UNKNOWN3 FIELD32(0x0f000000) +#define TX_PWR_CFG_3_UNKNOWN4 FIELD32(0xf0000000) /* bits for 3T devices */ #define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f) #define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0) @@ -1217,10 +1217,10 @@ * TX_PWR_CFG_4: */ #define TX_PWR_CFG_4 0x1324 -#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f) -#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0) -#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) -#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) +#define TX_PWR_CFG_4_UNKNOWN5 FIELD32(0x0000000f) +#define TX_PWR_CFG_4_UNKNOWN6 FIELD32(0x000000f0) +#define TX_PWR_CFG_4_UNKNOWN7 FIELD32(0x00000f00) +#define TX_PWR_CFG_4_UNKNOWN8 FIELD32(0x0000f000) /* bits for 3T devices */ #define TX_PWR_CFG_4_STBC4_CH0 FIELD32(0x0000000f) #define TX_PWR_CFG_4_STBC4_CH1 FIELD32(0x000000f0) -- cgit v1.2.3 From 3ea0a58cf9cf66e53f52184446fd881f828709e4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 28 Apr 2018 23:31:08 +0100 Subject: ipw2100: fix spelling mistake: "decsribed" -> "described" Trivial fix to spelling mistake in comment and in the ord_data text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- drivers/net/wireless/intel/ipw2x00/ipw2100.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 236b52423506..7c4f550a1475 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3732,7 +3732,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), IPW2100_ORD(ASSOCIATED_AP_PTR, "0 if not associated, else pointer to AP table entry"), IPW2100_ORD(AVAILABLE_AP_CNT, - "AP's decsribed in the AP table"), + "AP's described in the AP table"), IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"), IPW2100_ORD(STAT_AP_ASSNS, "associations"), IPW2100_ORD(STAT_ASSN_FAIL, "association failures"), diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index 193947865efd..ce3e35f6b60f 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h @@ -1009,7 +1009,7 @@ typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated // AP table entry. set to 0 if not associated - IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table + IPW_ORD_AVAILABLE_AP_CNT, // # of AP's described in the AP table IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs IPW_ORD_STAT_AP_ASSNS, // # of associations IPW_ORD_STAT_ASSN_FAIL, // # of association failures -- cgit v1.2.3 From cb746e47837ad0f35c8ae28e9aacc8eb07916d2a Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Thu, 26 Apr 2018 12:16:47 +0200 Subject: brcmfmac: check p2pdev mac address uniqueness The mac address for p2pdev must be different from the primary interface due to firmware requirement. Add an explicit check for this requirement if user-space provides a mac address. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index bcef208a81a5..4b2149b48362 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2073,6 +2073,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, } pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + + /* firmware requires unique mac address for p2pdev interface */ + if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) { + brcmf_err("discovery vif must be different from primary interface\n"); + return ERR_PTR(-EINVAL); + } + brcmf_p2p_generate_bss_mac(p2p, addr); brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); -- cgit v1.2.3 From 7742fce4c007141617dab9bcb90034b3c0fe2347 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Thu, 26 Apr 2018 12:18:35 +0200 Subject: brcmfmac: reports boottime_ns while informing bss Provides a timestamp in bss information so user space can see when the bss info was updated. Since tsf is not available from the dongle events boottime is reported instead. Reported-by: Dmitry Shmidt Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 89b86251910e..5d891cb4d625 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2728,7 +2728,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg); - struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; struct ieee80211_supported_band *band; struct brcmu_chan ch; @@ -2738,7 +2737,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, u16 notify_interval; u8 *notify_ie; size_t notify_ielen; - s32 notify_signal; + struct cfg80211_inform_bss bss_data = { 0 }; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { brcmf_err("Bss info is larger than buffer. Discarding\n"); @@ -2758,27 +2757,28 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); - notify_channel = ieee80211_get_channel(wiphy, freq); + bss_data.chan = ieee80211_get_channel(wiphy, freq); + bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; + bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); - notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; + bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100; brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID); brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq); brcmf_dbg(CONN, "Capability: %X\n", notify_capability); brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); - brcmf_dbg(CONN, "Signal: %d\n", notify_signal); - - bss = cfg80211_inform_bss(wiphy, notify_channel, - CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *)bi->BSSID, - 0, notify_capability, - notify_interval, notify_ie, - notify_ielen, notify_signal, - GFP_KERNEL); + brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal); + + bss = cfg80211_inform_bss_data(wiphy, &bss_data, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, GFP_KERNEL); if (!bss) return -ENOMEM; -- cgit v1.2.3 From aed14219067ab96e5eeb7730e9bceed10d9be989 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Thu, 26 Apr 2018 12:16:48 +0200 Subject: brcmfmac: use nl80211_band directly to get ieee80211 channel The enum nl80211_band used to retrieve wiphy->bands is the same as wiphy->bands->band which is checked by wiphy_register(). So it can be used directly as parameter of ieee80211_channel_to_frequency(). Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5d891cb4d625..ff6b3514c501 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2729,7 +2729,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, { struct wiphy *wiphy = cfg_to_wiphy(cfg); struct cfg80211_bss *bss; - struct ieee80211_supported_band *band; + enum nl80211_band band; struct brcmu_chan ch; u16 channel; u32 freq; @@ -2752,11 +2752,11 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, channel = bi->ctl_ch; if (channel <= CH_MAX_2G_CHANNEL) - band = wiphy->bands[NL80211_BAND_2GHZ]; + band = NL80211_BAND_2GHZ; else - band = wiphy->bands[NL80211_BAND_5GHZ]; + band = NL80211_BAND_5GHZ; - freq = ieee80211_channel_to_frequency(channel, band->band); + freq = ieee80211_channel_to_frequency(channel, band); bss_data.chan = ieee80211_get_channel(wiphy, freq); bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); -- cgit v1.2.3 From ff68c9f9c06d1fd437c8f90fc05ca28c47f7d85e Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Thu, 26 Apr 2018 12:16:49 +0200 Subject: brcmfmac: constify firmware mapping tables The information in the firmware mapping does not need to be modified so it can be static const. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 94e177d7c9b5..9095b830ae4d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -634,7 +634,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, struct brcmf_fw_request * brcmf_fw_alloc_request(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], + const struct brcmf_firmware_mapping mapping_table[], u32 table_size, struct brcmf_fw_name *fwnames, u32 n_fwnames) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 79a21095c349..2893e56910f0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -80,7 +80,7 @@ struct brcmf_fw_name { struct brcmf_fw_request * brcmf_fw_alloc_request(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], + const struct brcmf_firmware_mapping mapping_table[], u32 table_size, struct brcmf_fw_name *fwnames, u32 n_fwnames); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 6e25ff94dd07..9708647b99e7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -59,7 +59,7 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); -static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 1037df7297bb..412a05b9a2b2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -619,7 +619,7 @@ BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); -static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a0873adcc01c..a4308c6e72d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -52,7 +52,7 @@ BRCMF_FW_DEF(43242A, "brcmfmac43242a"); BRCMF_FW_DEF(43569, "brcmfmac43569"); BRCMF_FW_DEF(4373, "brcmfmac4373"); -static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43235_CHIP_ID, 0x00000008, 43236B), BRCMF_FW_ENTRY(BRCM_CC_43236_CHIP_ID, 0x00000008, 43236B), -- cgit v1.2.3 From 84ad327d18debe19b8d509059b61db445d048b02 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Thu, 26 Apr 2018 12:16:50 +0200 Subject: brcmfmac: add hostready indication A hostready signal is introduced to inform firmware through mailbox doorbell1 when common ring initialized or D3 exited. Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 9708647b99e7..71b69af9294c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -105,7 +105,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 -#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 +#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 +#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 #define BRCMF_PCIE2_INTA 0x01 #define BRCMF_PCIE2_INTB 0x02 @@ -140,6 +141,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 +#define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 @@ -782,6 +784,12 @@ static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) BRCMF_PCIE_MB_INT_FN0_1); } +static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo) +{ + if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) + brcmf_pcie_write_reg32(devinfo, + BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1); +} static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg) { @@ -924,7 +932,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx) brcmf_dbg(PCIE, "RING !\n"); /* Any arbitrary value will do, lets use 1 */ - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1); return 0; } @@ -1728,6 +1736,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); brcmf_pcie_intr_enable(devinfo); + brcmf_pcie_hostready(devinfo); if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0) return; @@ -1950,6 +1959,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev) brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_bus_change_state(bus, BRCMF_BUS_UP); brcmf_pcie_intr_enable(devinfo); + brcmf_pcie_hostready(devinfo); return 0; } -- cgit v1.2.3 From f56324baf329bc9362a52ad77a4a1a0f3356d1bc Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Thu, 26 Apr 2018 12:16:51 +0200 Subject: brcmfmac: coarse support for PCIe shared structure rev7 Revision 7 of PCIe dongle interface increases the item size of tx and rx complete rings to accommodate extra payload for new feature. This patch simply bump up the size of these two rings without adding the support for utilizing the new space. This makes brcmfmac compatible with rev7 firmware. Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/msgbuf.h | 6 ++++-- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 23 ++++++++++++++++++---- 2 files changed, 23 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index f93ba6be1ef8..692235d25277 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h @@ -27,8 +27,10 @@ #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE 32 #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE 24 -#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 16 -#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32 +#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7 16 +#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 24 +#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 32 +#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 40 #define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48 struct msgbuf_buf_addr { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 71b69af9294c..f0797aeada67 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -136,8 +136,9 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_PCIE_MB_INT_D2H3_DB0 | \ BRCMF_PCIE_MB_INT_D2H3_DB1) +#define BRCMF_PCIE_SHARED_VERSION_7 7 #define BRCMF_PCIE_MIN_SHARED_VERSION 5 -#define BRCMF_PCIE_MAX_SHARED_VERSION 6 +#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 @@ -318,6 +319,14 @@ static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM }; +static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = { + BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, + BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, + BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, + BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7, + BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 +}; + static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, @@ -1007,8 +1016,14 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, struct brcmf_pcie_ringbuf *ring; u32 size; u32 addr; + const u32 *ring_itemsize_array; + + if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7) + ring_itemsize_array = brcmf_ring_itemsize_pre_v7; + else + ring_itemsize_array = brcmf_ring_itemsize; - size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id]; + size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id]; dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, &dma_handle); @@ -1018,7 +1033,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; - brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]); + brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { @@ -1027,7 +1042,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, return NULL; } brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], - brcmf_ring_itemsize[ring_id], dma_buf); + ring_itemsize_array[ring_id], dma_buf); ring->dma_handle = dma_handle; ring->devinfo = devinfo; brcmf_commonring_register_cb(&ring->commonring, -- cgit v1.2.3 From 592a4cebc2bccb23880087a21c0626ab7481626d Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:39 -0700 Subject: liquidio: Moved common function if_cfg_callback to lio_core.c Moved common function if_cfg_callback to lio_core.c and renamed it to lio_if_cfg_callback. Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 32 ++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 35 +--------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 34 +-------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 4 +++ 4 files changed, 38 insertions(+), 67 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 844e288d60fe..e9532e28ffac 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -29,6 +29,38 @@ /* OOM task polling interval */ #define LIO_OOM_POLL_INTERVAL_MS 250 +/** + * \brief Callback for getting interface configuration + * @param status status of request + * @param buf pointer to resp structure + */ +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_context *ctx; + struct liquidio_if_cfg_resp *resp; + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (resp->status) + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", + CVM_CAST64(resp->status)); + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index fe3edf13c8f0..4ac3c4b8ce49 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1941,39 +1941,6 @@ static int load_firmware(struct octeon_device *oct) return ret; } -/** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_resp *resp; - struct liquidio_if_cfg_context *ctx; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n", - CVM_CAST64(resp->status), status); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /** * \brief Poll routine for checking transmit queue status * @param work work_struct data structure @@ -3576,7 +3543,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 3000; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b7b91d15ce3f..1d3661b55a25 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1058,38 +1058,6 @@ static void free_netsgbuf_with_resp(void *buf) /* Don't free the skb yet */ } -/** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /** * \brief Net device open for LiquidIO * @param netdev network device @@ -2171,7 +2139,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 5000; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d090edd1a4a5..11907e928472 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -199,6 +199,10 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), + void *buf); + /** * \brief Net device change_mtu * @param netdev network device -- cgit v1.2.3 From 85a0cd81863469484f22a9b69c7f4440989d32b8 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:45 -0700 Subject: liquidio: Moved common function list_delete_head to octeon_network.h Moved common function list_delete_head to octeon_network.h and renamed it to lio_list_delete_head Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 23 ++------------------ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 25 +++------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 19 ++++++++++++++++ 3 files changed, 24 insertions(+), 43 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 4ac3c4b8ce49..c11724504ab6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -541,25 +541,6 @@ static inline int check_txq_status(struct lio *lio) return ret_val; } -/** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static inline struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - /** * \brief Delete gather lists * @param lio per-network private data @@ -578,7 +559,7 @@ static void delete_glists(struct lio *lio) for (i = 0; i < lio->linfo.num_txpciq; i++) { do { g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); + lio_list_delete_head(&lio->glist[i]); if (g) kfree(g); } while (g); @@ -2590,7 +2571,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) spin_lock(&lio->glist_lock[q_idx]); g = (struct octnic_gather *) - list_delete_head(&lio->glist[q_idx]); + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 1d3661b55a25..0d9756177a2a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -284,25 +284,6 @@ static struct pci_driver liquidio_vf_pci_driver = { .err_handler = &liquidio_vf_err_handler, /* For AER */ }; -/** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - /** * \brief Delete gather lists * @param lio per-network private data @@ -321,7 +302,7 @@ static void delete_glists(struct lio *lio) for (i = 0; i < lio->linfo.num_txpciq; i++) { do { g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); + lio_list_delete_head(&lio->glist[i]); kfree(g); } while (g); @@ -1644,8 +1625,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) int i, frags; spin_lock(&lio->glist_lock[q_idx]); - g = (struct octnic_gather *)list_delete_head( - &lio->glist[q_idx]); + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 11907e928472..1d9392bbe5ef 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -565,4 +565,23 @@ static inline int skb_iq(struct lio *lio, struct sk_buff *skb) return skb->queue_mapping % lio->linfo.num_txpciq; } +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static inline struct list_head *lio_list_delete_head(struct list_head *root) +{ + struct list_head *node; + + if (root->prev == root && root->next == root) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; +} + #endif -- cgit v1.2.3 From fd311f1e7548cf45a273d46aa9c9c8d8330d803c Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:49 -0700 Subject: liquidio: Moved common function delete_glists to lio_core.c Moved common function delete_glists to lio_core.c and renamed it to lio_delete_glists Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 41 ++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 50 ++-------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 49 ++------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 + 4 files changed, 51 insertions(+), 91 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index e9532e28ffac..669b4f2d45e0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -61,6 +61,47 @@ void lio_if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } +/** + * \brief Delete gather lists + * @param lio per-network private data + */ +void lio_delete_glists(struct lio *lio) +{ + struct octnic_gather *g; + int i; + + kfree(lio->glist_lock); + lio->glist_lock = NULL; + + if (!lio->glist) + return; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + do { + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[i]); + kfree(g); + } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } + } + + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + + kfree(lio->glist); + lio->glist = NULL; +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index c11724504ab6..cb5df7c41e92 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -541,48 +541,6 @@ static inline int check_txq_status(struct lio *lio) return ret_val; } -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - lio_list_delete_head(&lio->glist[i]); - if (g) - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - /** * \brief Setup gather lists * @param lio per-network private data @@ -617,7 +575,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) GFP_KERNEL); if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } @@ -634,7 +592,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) &lio->glists_dma_base[i]); if (!lio->glists_virt_base[i]) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } @@ -656,7 +614,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) } if (j != lio->tx_qsize) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } } @@ -1452,7 +1410,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_rx_oom_poll_fn(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 0d9756177a2a..ab47fbbe3570 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -284,47 +284,6 @@ static struct pci_driver liquidio_vf_pci_driver = { .err_handler = &liquidio_vf_err_handler, /* For AER */ }; -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - lio_list_delete_head(&lio->glist[i]); - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - /** * \brief Setup gather lists * @param lio per-network private data @@ -359,7 +318,7 @@ static int setup_glists(struct lio *lio, int num_iqs) GFP_KERNEL); if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } @@ -374,7 +333,7 @@ static int setup_glists(struct lio *lio, int num_iqs) &lio->glists_dma_base[i]); if (!lio->glists_virt_base[i]) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } @@ -393,7 +352,7 @@ static int setup_glists(struct lio *lio, int num_iqs) } if (j != lio->tx_qsize) { - delete_glists(lio); + lio_delete_glists(lio); return -ENOMEM; } } @@ -837,7 +796,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_link_status_change_wq(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 1d9392bbe5ef..777af06a8570 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -203,6 +203,8 @@ void lio_if_cfg_callback(struct octeon_device *oct, u32 status __attribute__((unused)), void *buf); +void lio_delete_glists(struct lio *lio); + /** * \brief Net device change_mtu * @param netdev network device -- cgit v1.2.3 From a72b2c8ced35315e4f0fdd6f2c4c12f96bb0dc2e Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:51 -0700 Subject: liquidio: Moved common definition octnic_gather to octeon_network.h Moving common definition octnic_gather to octeon_network.h Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 21 --------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 18 ------------------ .../net/ethernet/cavium/liquidio/octeon_network.h | 21 +++++++++++++++++++++ 3 files changed, 21 insertions(+), 39 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index cb5df7c41e92..00d5634c7668 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -144,27 +144,6 @@ union tx_info { #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -/** Structure of a node in list of gather components maintained by - * NIC driver for each network device. - */ -struct octnic_gather { - /** List manipulation. Next and prev pointers. */ - struct list_head list; - - /** Size of the gather component at sg in bytes. */ - int sg_size; - - /** Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /** Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - struct handshake { struct completion init; struct completion started; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ab47fbbe3570..c4ba7ee9d4f0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -75,24 +75,6 @@ union tx_info { #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -struct octnic_gather { - /* List manipulation. Next and prev pointers. */ - struct list_head list; - - /* Size of the gather component at sg in bytes. */ - int sg_size; - - /* Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /* Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - static int liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void liquidio_vf_remove(struct pci_dev *pdev); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 777af06a8570..caf25b88cb53 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -47,6 +47,27 @@ struct liquidio_if_cfg_resp { u64 status; }; +/* Structure of a node in list of gather components maintained by + * NIC driver for each network device. + */ +struct octnic_gather { + /* List manipulation. Next and prev pointers. */ + struct list_head list; + + /* Size of the gather component at sg in bytes. */ + int sg_size; + + /* Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /* Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; + + dma_addr_t sg_dma_ptr; +}; + struct oct_nic_stats_resp { u64 rh; struct oct_link_stats stats; -- cgit v1.2.3 From 128ea39439341d4f60bda1740a59ce34bcc19e4c Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:55 -0700 Subject: liquidio: Moved common function setup_glists to lio_core.c Moved common function setup_glists to lio_core.c and reamed it to lio_setup_glists Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 83 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 85 +--------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 80 +------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 + 4 files changed, 87 insertions(+), 163 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 669b4f2d45e0..b805d54d8e00 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -29,6 +29,8 @@ /* OOM task polling interval */ #define LIO_OOM_POLL_INTERVAL_MS 250 +#define OCTNIC_MAX_SG MAX_SKB_FRAGS + /** * \brief Callback for getting interface configuration * @param status status of request @@ -102,6 +104,87 @@ void lio_delete_glists(struct lio *lio) lio->glist = NULL; } +/** + * \brief Setup gather lists + * @param lio per-network private data + */ +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) +{ + struct octnic_gather *g; + int i, j; + + lio->glist_lock = + kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); + if (!lio->glist_lock) + return -ENOMEM; + + lio->glist = + kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); + if (!lio->glist) { + kfree(lio->glist_lock); + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (i = 0; i < num_iqs; i++) { + int numa_node = dev_to_node(&oct->pci_dev->dev); + + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + lio->glists_virt_base[i] = + lio_dma_alloc(oct, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); + + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); + + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + lio_delete_glists(lio); + return -ENOMEM; + } + } + + return 0; +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 00d5634c7668..4b8e29f4e621 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -138,8 +138,6 @@ union tx_info { * by this structure in the NIC module. */ -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) @@ -520,87 +518,6 @@ static inline int check_txq_status(struct lio *lio) return ret_val; } -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) -{ - int i, j; - struct octnic_gather *g; - - lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), - GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), - GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - lio_delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - int numa_node = dev_to_node(&oct->pci_dev->dev); - - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(oct, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - lio_delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc_node(sizeof(*g), GFP_KERNEL, - numa_node); - if (!g) - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - lio_delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - /** * \brief Print link information * @param netdev network device @@ -3657,7 +3574,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(octeon_dev, lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index c4ba7ee9d4f0..be28b8f1f67d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -69,8 +69,6 @@ union tx_info { } s; }; -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) @@ -266,82 +264,6 @@ static struct pci_driver liquidio_vf_pci_driver = { .err_handler = &liquidio_vf_err_handler, /* For AER */ }; -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct lio *lio, int num_iqs) -{ - struct octnic_gather *g; - int i, j; - - lio->glist_lock = - kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = - kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - lio_delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - lio_delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - lio_delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - /** * \brief Print link information * @param netdev network device @@ -2226,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index caf25b88cb53..f8c1091639ae 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -226,6 +226,8 @@ void lio_if_cfg_callback(struct octeon_device *oct, void lio_delete_glists(struct lio *lio); +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); + /** * \brief Net device change_mtu * @param netdev network device -- cgit v1.2.3 From c33c997346c34ea7b89aec99524ad9632a2f1e0c Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 27 Apr 2018 23:32:57 -0700 Subject: liquidio: enhanced ethtool --set-channels feature Enhancing driver to accept max supported queues for ethtool --set-channels Signed-off-by: Intiyaz Basha Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/cn23xx_pf_device.c | 6 +- .../ethernet/cavium/liquidio/cn23xx_pf_device.h | 2 + drivers/net/ethernet/cavium/liquidio/lio_core.c | 4 +- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 263 +++++++++++++++++++-- drivers/net/ethernet/cavium/liquidio/lio_main.c | 64 +++-- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 8 +- .../net/ethernet/cavium/liquidio/liquidio_common.h | 1 + .../net/ethernet/cavium/liquidio/octeon_device.c | 12 +- .../net/ethernet/cavium/liquidio/octeon_device.h | 2 +- .../net/ethernet/cavium/liquidio/octeon_network.h | 12 +- 10 files changed, 316 insertions(+), 58 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index bc9861c90ea3..929d485a3a2f 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct) CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); } -static int cn23xx_sriov_config(struct octeon_device *oct) +int cn23xx_sriov_config(struct octeon_device *oct) { struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; u32 max_rings, total_rings, max_vfs, rings_per_vf; @@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct) break; } - if (max_rings <= num_present_cpus()) - num_pf_rings = 1; + if (oct->sriov_info.num_pf_rings) + num_pf_rings = oct->sriov_info.num_pf_rings; else num_pf_rings = num_present_cpus(); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 63b3de4f2bfe..e6f31d0d5c0b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -61,6 +61,8 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); +int cn23xx_sriov_config(struct octeon_device *oct); + int cn23xx_fw_loaded(struct octeon_device *oct); void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index b805d54d8e00..6821afcdc365 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -78,7 +78,7 @@ void lio_delete_glists(struct lio *lio) if (!lio->glist) return; - for (i = 0; i < lio->linfo.num_txpciq; i++) { + for (i = 0; i < lio->oct_dev->num_iqs; i++) { do { g = (struct octnic_gather *) lio_list_delete_head(&lio->glist[i]); @@ -1036,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) int num_ioq_vectors; int irqret, err; - oct->num_msix_irqs = num_ioqs; if (oct->msix_on) { + oct->num_msix_irqs = num_ioqs; if (OCTEON_CN23XX_PF(oct)) { num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 64c817a63a01..fff60ca20c35 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -361,7 +361,14 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - max_combined = lio->linfo.num_txpciq; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, cn23xx_pf); + + max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); + } combined_count = oct->num_iqs; } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; @@ -425,9 +432,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; + + if (octeon_allocate_ioq_vector(oct, num_ioqs)) { + dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); + return -1; + } + if (octeon_setup_interrupt(oct, num_ioqs)) { dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); - return 1; + return -1; } /* Enable Octeon device interrupts */ @@ -457,7 +470,16 @@ lio_ethtool_set_channels(struct net_device *dev, combined_count = channel->combined_count; if (OCTEON_CN23XX_PF(oct)) { - max_combined = channel->max_combined; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, + cn23xx_pf); + + max_combined = + CFG_GET_IQ_MAX_Q(conf23_pf); + } } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); @@ -485,7 +507,6 @@ lio_ethtool_set_channels(struct net_device *dev, if (lio_reset_queues(dev, combined_count)) return -EINVAL; - lio_irq_reallocate_irqs(oct, combined_count); if (stopped) dev->netdev_ops->ndo_open(dev); @@ -824,12 +845,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ering->rx_jumbo_max_pending = 0; } +static int lio_23xx_reconfigure_queue_count(struct lio *lio) +{ + struct octeon_device *oct = lio->oct_dev; + struct liquidio_if_cfg_context *ctx; + u32 resp_size, ctx_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct lio_version *vdata; + u32 ifidx_or_pfnum; + int retval; + int j; + + resp_size = sizeof(struct liquidio_if_cfg_resp); + ctx_size = sizeof(struct liquidio_if_cfg_context); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, data_size, + resp_size, ctx_size); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", + __func__); + return -1; + } + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + vdata = (struct lio_version *)sc->virtdptr; + + vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + ifidx_or_pfnum = oct->pf_num; + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct); + init_waitqueue_head(&ctx->wc); + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.base_queue = oct->sriov_info.pf_srn; + if_cfg.s.gmx_port_id = oct->pf_num; + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_QCOUNT_UPDATE, 0, + if_cfg.u64, 0); + sc->callback = lio_if_cfg_callback; + sc->callback_arg = sc; + sc->wait_time = LIO_IFCFG_WAIT_TIME; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "iq/oq config failed status: %x\n", + retval); + goto qcount_update_fail; + } + + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { + dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); + return -1; + } + + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); + goto qcount_update_fail; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + lio->ifidx = ifidx_or_pfnum; + lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); + lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); + for (j = 0; j < lio->linfo.num_rxpciq; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + + for (j = 0; j < lio->linfo.num_txpciq; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + octeon_free_soft_command(oct, sc); + dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", + lio->linfo.num_rxpciq); + + return 0; + +qcount_update_fail: + octeon_free_soft_command(oct, sc); + + return -1; +} + static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + int i, queue_count_update = 0; struct napi_struct *napi, *n; - int i, update = 0; + int ret; + + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); @@ -838,7 +967,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); if (octeon_set_io_queues_off(oct)) { - dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); return -1; } @@ -851,9 +980,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) netif_napi_del(napi); if (num_qs != oct->num_iqs) { - netif_set_real_num_rx_queues(netdev, num_qs); - netif_set_real_num_tx_queues(netdev, num_qs); - update = 1; + ret = netif_set_real_num_rx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number rx failed\n"); + return ret; + } + + ret = netif_set_real_num_tx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number tx failed\n"); + return ret; + } + + /* The value of queue_count_update decides whether it is the + * queue count or the descriptor count that is being + * re-configured. + */ + queue_count_update = 1; + } + + /* Re-configuration of queues can happen in two scenarios, SRIOV enabled + * and SRIOV disabled. Few things like recreating queue zero, resetting + * glists and IRQs are required for both. For the latter, some more + * steps like updating sriov_info for the octeon device need to be done. + */ + if (queue_count_update) { + lio_delete_glists(lio); + + /* Delete mbox for PF which is SRIOV disabled because sriov_info + * will be now changed. + */ + if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) + oct->fn_list.free_mbox(oct); } for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { @@ -868,24 +1028,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) octeon_delete_instr_queue(oct, i); } + if (queue_count_update) { + /* For PF re-configure sriov related information */ + if ((OCTEON_CN23XX_PF(oct)) && + !oct->sriov_info.sriov_enabled) { + oct->sriov_info.num_pf_rings = num_qs; + if (cn23xx_sriov_config(oct)) { + dev_err(&oct->pci_dev->dev, + "Queue reset aborted: SRIOV config failed\n"); + return -1; + } + + num_qs = oct->sriov_info.num_pf_rings; + } + } + if (oct->fn_list.setup_device_regs(oct)) { dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); return -1; } - if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { - dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); - return -1; + /* The following are needed in case of queue count re-configuration and + * not for descriptor count re-configuration. + */ + if (queue_count_update) { + if (octeon_setup_instr_queues(oct)) + return -1; + + if (octeon_setup_output_queues(oct)) + return -1; + + /* Recreating mbox for PF that is SRIOV disabled */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return -1; + } + } + + /* Deleting and recreating IRQs whether the interface is SRIOV + * enabled or disabled. + */ + if (lio_irq_reallocate_irqs(oct, num_qs)) { + dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); + return -1; + } + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->droq[i]->max_count, + oct->droq[i]->pkts_credit_reg); + + /* Informing firmware about the new queue count. It is required + * for firmware to allocate more number of queues than those at + * load time. + */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (lio_23xx_reconfigure_queue_count(lio)) + return -1; + } } - /* Enable the input and output queues for this Octeon device */ - if (oct->fn_list.enable_io_queues(oct)) { - dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); + /* Once firmware is aware of the new value, queues can be recreated */ + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { + dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); return -1; } - if (update && lio_send_queue_count_update(netdev, num_qs)) - return -1; + if (queue_count_update) { + if (lio_setup_glists(oct, lio, num_qs)) { + dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); + return -1; + } + + /* Send firmware the information about new number of queues + * if the interface is a VF or a PF that is SRIOV enabled. + */ + if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) + if (lio_send_queue_count_update(netdev, num_qs)) + return -1; + } return 0; } @@ -930,7 +1157,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, rx_count); - if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) + if (lio_reset_queues(netdev, oct->num_iqs)) goto err_lio_reset_queues; if (stopped) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 4b8e29f4e621..ee75048b3937 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -497,7 +497,7 @@ static void liquidio_deinit_pci(void) */ static inline int check_txq_status(struct lio *lio) { - int numqs = lio->netdev->num_tx_queues; + int numqs = lio->netdev->real_num_tx_queues; int ret_val = 0; int q, iq; @@ -1521,7 +1521,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); spin_unlock(&lio->glist_lock[iq]); @@ -1564,7 +1564,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1851,11 +1851,6 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - /* Ready for link status updates */ - lio->intf_open = 1; - - netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - if (OCTEON_CN23XX_PF(oct)) { if (!oct->msix_on) if (setup_tx_poll_fn(netdev)) @@ -1865,7 +1860,12 @@ static int liquidio_open(struct net_device *netdev) return -1; } - start_txqs(netdev); + netif_tx_start_all_queues(netdev); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -1888,11 +1888,15 @@ static int liquidio_stop(struct net_device *netdev) ifstate_reset(lio, LIO_IFSTATE_RUNNING); - netif_tx_disable(netdev); + /* Stop any link updates */ + lio->intf_open = 0; + + stop_txqs(netdev); /* Inform that netif carrier is down */ netif_carrier_off(netdev); - lio->intf_open = 0; + netif_tx_disable(netdev); + lio->linfo.link.s.link_up = 0; lio->link_changes++; @@ -2332,7 +2336,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(oct, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -3298,6 +3302,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; + int max_num_queues = 0; union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; @@ -3380,7 +3385,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; - sc->wait_time = 3000; + sc->wait_time = LIO_IFCFG_WAIT_TIME; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3434,11 +3439,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) resp->cfg_info.oqmask); goto setup_nic_dev_fail; } + + if (OCTEON_CN6XXX(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn6xxx)); + } else if (OCTEON_CN23XX_PF(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn23xx_pf)); + } + dev_dbg(&octeon_dev->pci_dev->dev, - "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, - num_iqueues, num_oqueues); - netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + num_iqueues, num_oqueues, max_num_queues); + netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); @@ -3453,6 +3467,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->netdev_ops = &lionetdevops; SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); + retval = netif_set_real_num_rx_queues(netdev, num_oqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number rx failed\n"); + goto setup_nic_dev_fail; + } + + retval = netif_set_real_num_tx_queues(netdev, num_iqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number tx failed\n"); + goto setup_nic_dev_fail; + } + lio = GET_LIO(netdev); memset(lio, 0, sizeof(struct lio)); @@ -4073,7 +4101,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) } atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(octeon_dev)) { + if (octeon_allocate_ioq_vector + (octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) { dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index be28b8f1f67d..08b682b1c8ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -849,7 +849,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -893,7 +893,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1407,7 +1407,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(lio->oct_dev, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -2339,7 +2339,7 @@ static int octeon_device_init(struct octeon_device *oct) } atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(oct)) { + if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 2166744b6812..026570499dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -84,6 +84,7 @@ enum octeon_tag_type { #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_QCOUNT_UPDATE 0x12 #define OPCODE_NIC_SET_TRUSTED_VF 0x13 #define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define VF_DRV_LOADED 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f38abf626412..f878a552fef3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct) } int -octeon_allocate_ioq_vector(struct octeon_device *oct) +octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs) { - int i, num_ioqs = 0; struct octeon_ioq_vector *ioq_vector; int cpu_num; int size; - - if (OCTEON_CN23XX_PF(oct)) - num_ioqs = oct->sriov_info.num_pf_rings; - else if (OCTEON_CN23XX_VF(oct)) - num_ioqs = oct->sriov_info.rings_per_vf; + int i; size = sizeof(struct octeon_ioq_vector) * num_ioqs; oct->ioq_vector = vzalloc(size); if (!oct->ioq_vector) - return 1; + return -1; for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; @@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) else ioq_vector->ioq_num = i; } + return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 91937cc5c1d7..9430c0a0bb8c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -867,7 +867,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); struct octeon_config *octeon_get_conf(struct octeon_device *oct); void octeon_free_ioq_vector(struct octeon_device *oct); -int octeon_allocate_ioq_vector(struct octeon_device *oct); +int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs); void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); /* LiquidIO driver pivate flags */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index f8c1091639ae..8571f11e3c8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -47,6 +47,8 @@ struct liquidio_if_cfg_resp { u64 status; }; +#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ + /* Structure of a node in list of gather components maintained by * NIC driver for each network device. */ @@ -546,7 +548,7 @@ static inline void stop_txqs(struct net_device *netdev) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_stop_subqueue(netdev, i); } @@ -559,7 +561,7 @@ static inline void wake_txqs(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); int i, qno; - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; if (__netif_subqueue_stopped(netdev, i)) { @@ -580,14 +582,14 @@ static inline void start_txqs(struct net_device *netdev) int i; if (lio->linfo.link.s.link_up) { - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_start_subqueue(netdev, i); } } -static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb) { - return skb->queue_mapping % lio->linfo.num_txpciq; + return skb->queue_mapping % oct->num_iqs; } /** -- cgit v1.2.3 From 76c2a96d42ca3bdac12c463ff27fec3bb2982e3f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 28 Apr 2018 10:52:16 +0100 Subject: liquidio: fix spelling mistake: "mac_tx_multi_collison" -> "mac_tx_multi_collision" Trivial fix to spelling mistake in oct_stats_strings text Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index fff60ca20c35..a1d84304a608 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -122,7 +122,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "mac_tx_ctl_packets", "mac_tx_total_collisions", "mac_tx_one_collision", - "mac_tx_multi_collison", + "mac_tx_multi_collision", "mac_tx_max_collision_fail", "mac_tx_max_deferal_fail", "mac_tx_fifo_err", -- cgit v1.2.3 From e66267483eab88a05103f2b9df6a5682b87f38b0 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:08 +0200 Subject: r8169: remove unneeded call to __rtl8169_set_features in rtl_open RxChkSum and RxVlan aren't touched outside __rtl8169_set_features (except in probe), so they are always in sync with dev->features. And the RxConfig flags are set in rtl_set_rx_mode() which is called via dev_set_rx_mode() from __dev_open(). Therefore we can safely remove this call. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a5d00ee94245..d2656224fc49 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7637,8 +7637,6 @@ static int rtl_open(struct net_device *dev) rtl8169_init_phy(dev, tp); - __rtl8169_set_features(dev, dev->features); - rtl_pll_power_up(tp); rtl_hw_start(tp); -- cgit v1.2.3 From a3984578bf0ed578085bd3c382867c0121bb1534 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:15 +0200 Subject: r8169: improve rtl8169_set_features __rtl8169_set_features is used in rtl8169_set_features only, so we can inline it. In addition: - Remove check (features ^ dev->features), __netdev_update_features check's already that requested features differ from current ones. - Don't mask out unsupported flags, there's no benefit in it. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d2656224fc49..411d12bee50c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1935,12 +1935,14 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev, return features; } -static void __rtl8169_set_features(struct net_device *dev, - netdev_features_t features) +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); u32 rx_config; + rtl_lock_work(tp); + rx_config = RTL_R32(tp, RxConfig); if (features & NETIF_F_RXALL) rx_config |= (AcceptErr | AcceptRunt); @@ -1963,24 +1965,12 @@ static void __rtl8169_set_features(struct net_device *dev, RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); -} -static int rtl8169_set_features(struct net_device *dev, - netdev_features_t features) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; - - rtl_lock_work(tp); - if (features ^ dev->features) - __rtl8169_set_features(dev, features); rtl_unlock_work(tp); return 0; } - static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) { return (skb_vlan_tag_present(skb)) ? -- cgit v1.2.3 From 9a3c81fa616ba5dc10eec59017ab1f7f0cc35ee9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:21 +0200 Subject: r8169: replace magic number for INTT mask with a constant Use a proper constant for INTT bit mask. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 411d12bee50c..1dd189ddec3e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -599,6 +599,7 @@ enum rtl_register_content { RxChkSum = (1 << 5), PCIDAC = (1 << 4), PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) INTT_0 = 0x0000, // 8168 INTT_1 = 0x0001, // 8168 INTT_2 = 0x0002, // 8168 @@ -2344,7 +2345,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) if (IS_ERR(ci)) return PTR_ERR(ci); - scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3]; + scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & INTT_MASK]; /* read IntrMitigate and adjust according to scale */ for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { @@ -2443,7 +2444,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) RTL_W16(tp, IntrMitigate, swab16(w)); - tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); -- cgit v1.2.3 From 0ae0974eb3d08e2166dd83aa669b63cb84a5ced3 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:26 +0200 Subject: r8169: improve CPlusCmd handling tp->cp_cmd is supposed to reflect the current value of the CplusCmd register. Several (quite old) changes however directly change this register w/o updating tp->cp_cmd. Also we have places in the code reading this register where we could use the cached value. In addition: - Properly initialize tp->cmd with the register value. - In rtl_hw_start_8169 remove one setting of PCIMulRW because it's set unconditionally anyway a few lines later. - In rtl_hw_start_8168 properly mask out the INTT bits before setting INTT_1. So far we rely on both bits being zero. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 42 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1dd189ddec3e..868dee7d1131 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1962,8 +1962,6 @@ static int rtl8169_set_features(struct net_device *dev, else tp->cp_cmd &= ~RxVlan; - tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum); - RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); @@ -2345,7 +2343,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) if (IS_ERR(ci)) return PTR_ERR(ci); - scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & INTT_MASK]; + scale = &ci->scalev[tp->cp_cmd & INTT_MASK]; /* read IntrMitigate and adjust according to scale */ for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { @@ -4841,7 +4839,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || tp->mac_version == RTL_GIGA_MAC_VER_24) && - (RTL_R16(tp, CPlusCmd) & ASF)) { + (tp->cp_cmd & ASF)) { return; } @@ -5321,15 +5319,6 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); } -static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) -{ - u16 cmd; - - cmd = RTL_R16(tp, CPlusCmd); - RTL_W16(tp, CPlusCmd, cmd); - return cmd; -} - static void rtl_set_rx_max_size(struct rtl8169_private *tp) { /* Low hurts. Let's disable the filtering. */ @@ -5415,10 +5404,8 @@ static void rtl_set_rx_mode(struct net_device *dev) static void rtl_hw_start_8169(struct rtl8169_private *tp) { - if (tp->mac_version == RTL_GIGA_MAC_VER_05) { - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW); + if (tp->mac_version == RTL_GIGA_MAC_VER_05) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - } RTL_W8(tp, Cfg9346, Cfg9346_Unlock); if (tp->mac_version == RTL_GIGA_MAC_VER_01 || @@ -5439,7 +5426,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_04) rtl_set_rx_tx_config_registers(tp); - tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW; + tp->cp_cmd |= PCIMulRW; if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { @@ -5671,7 +5658,8 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); if (tp->dev->mtu <= ETH_DATA_LEN) { rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | @@ -5699,7 +5687,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) rtl_disable_clock_request(tp); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) @@ -5728,7 +5717,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) @@ -5745,7 +5735,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) @@ -5802,7 +5793,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168dp(struct rtl8169_private *tp) @@ -6271,8 +6263,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) rtl_set_rx_max_size(tp); - tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1; - + tp->cp_cmd &= ~INTT_MASK; + tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_W16(tp, IntrMitigate, 0x5151); @@ -8130,7 +8122,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver); - tp->cp_cmd = 0; + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && -- cgit v1.2.3 From 12d42c505eb18c0d1f70ec36cea022bb195e7ba7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:30 +0200 Subject: r8169: improve handling of CPCMD quirk mask Both quirk masks are the same, so we can merge them. The quirk mask includes most bits so it's actually easier to define a mask with the bits to keep. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 868dee7d1131..cf7a7db53791 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -690,6 +690,7 @@ enum rtl_rx_desc_bit { }; #define RsvdMask 0x3fffc000 +#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) struct TxDesc { __le32 opts1; @@ -5643,22 +5644,11 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } -#define R8168_CPCMD_QUIRK_MASK (\ - EnableBist | \ - Mac_dbgo_oe | \ - Force_half_dup | \ - Force_rxflow_en | \ - Force_txflow_en | \ - Cxpl_dbg_sel | \ - ASF | \ - PktCntrDisable | \ - Mac_dbgo_sel) - static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); if (tp->dev->mtu <= ETH_DATA_LEN) { @@ -5687,7 +5677,7 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) rtl_disable_clock_request(tp); - tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); } @@ -5717,7 +5707,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); } @@ -5735,7 +5725,7 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); } @@ -5793,7 +5783,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - tp->cp_cmd &= ~R8168_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); } @@ -6394,17 +6384,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } -#define R810X_CPCMD_QUIRK_MASK (\ - EnableBist | \ - Mac_dbgo_oe | \ - Force_half_dup | \ - Force_rxflow_en | \ - Force_txflow_en | \ - Cxpl_dbg_sel | \ - ASF | \ - PktCntrDisable | \ - Mac_dbgo_sel) - static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8102e_1[] = { @@ -6544,7 +6523,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp) rtl_set_rx_max_size(tp); - tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); rtl_set_rx_tx_desc_registers(tp); -- cgit v1.2.3 From 3559d81e76bfe3803e89f2e04cf6ef7ab4f3aace Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:40 +0200 Subject: r8169: simplify rtl_hw_start_8169 Currently done: - if mac_version in (01, 02, 03, 04) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - if mac_version in (01, 02, 03, 04) rtl_set_rx_tx_config_registers(tp); - if mac_version not in (01, 02, 03, 04) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); So we do exactly the same independent of chip version and can simplify the code. In addition remove the call to rtl_init_rxcfg(), it's called in rtl_init_one() already and the set bits are never touched later. rtl_init_8168/8101 don't include this call either. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index cf7a7db53791..8c816f6c6fc5 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5409,24 +5409,11 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - if (tp->mac_version == RTL_GIGA_MAC_VER_01 || - tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03 || - tp->mac_version == RTL_GIGA_MAC_VER_04) - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - rtl_init_rxcfg(tp); RTL_W8(tp, EarlyTxThres, NoEarlyTx); rtl_set_rx_max_size(tp); - if (tp->mac_version == RTL_GIGA_MAC_VER_01 || - tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03 || - tp->mac_version == RTL_GIGA_MAC_VER_04) - rtl_set_rx_tx_config_registers(tp); - tp->cp_cmd |= PCIMulRW; if (tp->mac_version == RTL_GIGA_MAC_VER_02 || @@ -5447,14 +5434,9 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) RTL_W16(tp, IntrMitigate, 0x0000); rtl_set_rx_tx_desc_registers(tp); + rtl_set_rx_tx_config_registers(tp); - if (tp->mac_version != RTL_GIGA_MAC_VER_01 && - tp->mac_version != RTL_GIGA_MAC_VER_02 && - tp->mac_version != RTL_GIGA_MAC_VER_03 && - tp->mac_version != RTL_GIGA_MAC_VER_04) { - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_tx_config_registers(tp); - } + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W8(tp, Cfg9346, Cfg9346_Lock); -- cgit v1.2.3 From 82d3ff6dd1994d54fe11714ffd4c696cfcacbea1 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:43 +0200 Subject: r8169: remove calls to rtl_set_rx_mode __dev_open() calls the ndo_set_rx_mode callback anyway, so we don't have to do it here too. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8c816f6c6fc5..c7b9301aede1 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5445,8 +5445,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) RTL_W32(tp, RxMissed, 0); - rtl_set_rx_mode(tp->dev); - /* no early-rx interrupts */ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } @@ -6361,8 +6359,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_mode(tp->dev); - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } @@ -6554,8 +6550,6 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_mode(tp->dev); - RTL_R8(tp, IntrMask); RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); -- cgit v1.2.3 From 4fd48c4ac0a0578862819295222a825c97686ac7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 28 Apr 2018 22:19:47 +0200 Subject: r8169: move common initializations to tp->hw_start The chip-specific init code includes quite some calls which are identical for all chips. So move these calls to tp->hw_start(). In addition move rtl_set_rx_max_size() a little to make sure it's defined before it's used. Unfortunately the diff generated by git is a little bit hard to read. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 74 +++++++++--------------------------- 1 file changed, 19 insertions(+), 55 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c7b9301aede1..66f10d119b12 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5301,10 +5301,10 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) (InterFrameGap << TxInterFrameGapShift)); } -static void rtl_hw_start(struct rtl8169_private *tp) +static void rtl_set_rx_max_size(struct rtl8169_private *tp) { - tp->hw_start(tp); - rtl_irq_enable_all(tp); + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); } static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) @@ -5320,10 +5320,23 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); } -static void rtl_set_rx_max_size(struct rtl8169_private *tp) +static void rtl_hw_start(struct rtl8169_private *tp) { - /* Low hurts. Let's disable the filtering. */ - RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + + tp->hw_start(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_set_rx_tx_config_registers(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(tp, IntrMask); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + /* no early-rx interrupts */ + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); + rtl_irq_enable_all(tp); } static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) @@ -5408,12 +5421,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_05) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, EarlyTxThres, NoEarlyTx); - rtl_set_rx_max_size(tp); - tp->cp_cmd |= PCIMulRW; if (tp->mac_version == RTL_GIGA_MAC_VER_02 || @@ -5433,20 +5442,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) */ RTL_W16(tp, IntrMitigate, 0x0000); - rtl_set_rx_tx_desc_registers(tp); - rtl_set_rx_tx_config_registers(tp); - - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(tp, IntrMask); - RTL_W32(tp, RxMissed, 0); - - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) @@ -6227,12 +6223,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) static void rtl_hw_start_8168(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp); - tp->cp_cmd &= ~INTT_MASK; tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); @@ -6245,12 +6237,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) tp->event_slow &= ~RxOverflow; } - rtl_set_rx_tx_desc_registers(tp); - - rtl_set_rx_tx_config_registers(tp); - - RTL_R8(tp, IntrMask); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: rtl_hw_start_8168bb(tp); @@ -6354,12 +6340,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) tp->dev->name, tp->mac_version); break; } - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) @@ -6495,19 +6475,11 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp) pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN); - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp); - tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - rtl_set_rx_tx_desc_registers(tp); - - rtl_set_rx_tx_config_registers(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: rtl_hw_start_8102e_1(tp); @@ -6544,15 +6516,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp) break; } - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - RTL_W16(tp, IntrMitigate, 0x0000); - - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - RTL_R8(tp, IntrMask); - - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) -- cgit v1.2.3 From 04a1e08cc734da60445c417e314a0f35a642861b Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 10:08:18 +0800 Subject: i40evf: Replace GFP_ATOMIC with GFP_KERNEL in i40evf_add_vlan i40evf_add_vlan() is never called in atomic context. i40evf_add_vlan() is only called by i40evf_vlan_rx_add_vid(), which is only set as ".ndo_vlan_rx_add_vid" in struct net_device_ops. ".ndo_vlan_rx_add_vid" is not called in atomic context. Despite never getting called from atomic context, i40evf_add_vlan() calls kzalloc() with GFP_ATOMIC, which does not sleep for allocation. GFP_ATOMIC is not necessary and can be replaced with GFP_KERNEL, which can sleep and improve the possibility of sucessful allocation. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 97cda4a8f8e0..8f775a82e2fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -681,7 +681,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) f = i40evf_find_vlan(adapter, vlan); if (!f) { - f = kzalloc(sizeof(*f), GFP_ATOMIC); + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto clearout; -- cgit v1.2.3 From a6a188e48959829fc6d64a1b52762314bcaf6417 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Apr 2018 12:35:22 +0800 Subject: libcxgb,cxgb4: use __skb_put_zero to simplfy code use helper __skb_put_zero to replace the pattern of __skb_put() && memset() Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 3 +-- drivers/net/ethernet/chelsio/cxgb4/srq.c | 3 +-- drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h | 15 +++++---------- 3 files changed, 7 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index db92f1858060..aae980205ece 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -64,8 +64,7 @@ static int set_tcb_field(struct adapter *adap, struct filter_entry *f, if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req)); INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(adap->sge.fw_evtq.abs_id) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c index 6228a5708307..82b70a565e24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/srq.c +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c @@ -84,8 +84,7 @@ int cxgb4_get_srq_entry(struct net_device *dev, if (!skb) return -ENOMEM; req = (struct cpl_srq_table_req *) - __skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + __skb_put_zero(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, TID_TID_V(srq_idx) | diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h index 4b5aacc09cab..240ba9d4c399 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h @@ -90,8 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_tid_release *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); @@ -104,8 +103,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_close_con_req *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); @@ -119,8 +117,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_abort_req *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); @@ -134,8 +131,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_abort_rpl *rpl; - rpl = __skb_put(skb, len); - memset(rpl, 0, len); + rpl = __skb_put_zero(skb, len); INIT_TP_WR(rpl, tid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); @@ -149,8 +145,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_rx_data_ack *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid)); -- cgit v1.2.3 From f5254429e1756ad7fede0249c9b779e37b6c967f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 20 Apr 2018 01:41:33 -0700 Subject: i40e/i40evf: cleanup incorrect function doxygen comments Recent versions of the Linux kernel now warn about incorrect parameter definitions for function comments. Fix up several function comments to correctly reflect the current function arguments. This cleans up the warnings and helps ensure our documentation is accurate. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_client.c | 6 ++-- drivers/net/ethernet/intel/i40e/i40e_common.c | 37 +++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 11 ++++--- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 8 ++--- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 24 ++++++++------ drivers/net/ethernet/intel/i40e/i40e_hmc.c | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 22 +++++++++---- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 1 + drivers/net/ethernet/intel/i40e/i40e_ptp.c | 4 +-- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++-- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 16 +++++----- drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 + drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 4 ++- drivers/net/ethernet/intel/i40evf/i40evf_client.c | 4 +-- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 7 ++-- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 ++- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 11 +------ 17 files changed, 95 insertions(+), 73 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 2041757f948c..5f3b8b9ff511 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -40,7 +40,7 @@ static struct i40e_ops i40e_lan_ops = { /** * i40e_client_get_params - Get the params that can change at runtime * @vsi: the VSI with the message - * @param: clinet param struct + * @params: client param struct * **/ static @@ -566,7 +566,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, * i40e_client_setup_qvlist * @ldev: pointer to L2 context. * @client: Client pointer. - * @qv_info: queue and vector list + * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ @@ -641,7 +641,7 @@ err: * i40e_client_request_reset * @ldev: pointer to L2 context. * @client: Client pointer. - * @level: reset level + * @reset_level: reset level **/ static void i40e_client_request_reset(struct i40e_info *ldev, struct i40e_client *client, diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6f8fd70d606a..eb2d1530d331 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1671,6 +1671,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, /** * i40e_set_fc * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ @@ -2807,8 +2809,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only @@ -2868,8 +2870,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ @@ -2899,8 +2901,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ @@ -3648,6 +3650,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, /** * i40e_aq_start_lldp * @hw: pointer to the hw struct + * @buff: buffer for result + * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. @@ -3728,7 +3732,6 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order - * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL @@ -3947,6 +3950,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: seid of the switching component connected to Physical Port * @ets_data: Buffer holding ETS parameters + * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, @@ -4290,10 +4294,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ -#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; @@ -4424,6 +4428,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read + * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * @@ -4550,7 +4555,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4595,7 +4600,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value @@ -4636,7 +4641,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4710,7 +4715,7 @@ phy_read_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -4777,7 +4782,7 @@ phy_write_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -4813,7 +4818,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4848,7 +4853,6 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want - * @phy_addr: Returned PHY address * * Gets PHY address for current port **/ @@ -5058,7 +5062,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, * i40e_led_set_phy * @hw: pointer to the HW structure * @on: true or false + * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore + * * Set led's on or off when controlled by the PHY * **/ @@ -5347,6 +5353,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes + * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ enum diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index a09d723d5ca0..9deae9a35423 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -23,7 +23,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) /** * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration - * @netdev: the corresponding netdev + * @dev: the corresponding netdev * @ets: structure to hold the ETS information * * Returns local IEEE ETS configuration @@ -62,8 +62,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev, /** * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration - * @netdev: the corresponding netdev - * @ets: structure to hold the PFC information + * @dev: the corresponding netdev + * @pfc: structure to hold the PFC information * * Returns local IEEE PFC configuration **/ @@ -95,7 +95,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, /** * i40e_dcbnl_getdcbx - retrieve current DCBx capability - * @netdev: the corresponding netdev + * @dev: the corresponding netdev * * Returns DCBx capability features **/ @@ -108,7 +108,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev) /** * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx - * @netdev: the corresponding netdev + * @dev: the corresponding netdev + * @perm_addr: buffer to store the MAC address * * Returns the SAN MAC address used for LLDP exchange **/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 94774a2a511f..56b911a5dd8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -12,8 +12,8 @@ static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid - * @pf - the PF structure to search for the vsi - * @seid - seid of the vsi it is searching for + * @pf: the PF structure to search for the vsi + * @seid: seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) { @@ -31,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) /** * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf - the PF structure to search for the veb - * @seid - seid of the veb it is searching for + * @pf: the PF structure to search for the veb + * @seid: seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 94dbe2c419ca..c1bbfb913e49 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1055,6 +1055,9 @@ static int i40e_nway_reset(struct net_device *netdev) /** * i40e_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * * Return tx/rx-pause status **/ static void i40e_get_pauseparam(struct net_device *netdev, @@ -2526,7 +2529,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) /** * i40e_check_mask - Check whether a mask field is set * @mask: the full mask value - * @field; mask of the field to check + * @field: mask of the field to check * * If the given mask is fully set, return positive value. If the mask for the * field is fully unset, return zero. Otherwise return a negative error code. @@ -2597,6 +2600,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, /** * i40e_fill_rx_flow_user_data - Fill in user-defined data field * @fsp: pointer to rx_flow specification + * @data: pointer to return userdef data * * Reads the userdef data structure and properly fills in the user defined * fields of the rx_flow_spec. @@ -2775,6 +2779,7 @@ no_input_set: * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule data * * Returns Success if the command is supported. **/ @@ -2816,7 +2821,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register * @nfc: pointer to user request - * @i_setc bits currently set + * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ @@ -2861,7 +2866,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct - * @cmd: ethtool rxnfc command + * @nfc: ethtool rxnfc command * * Returns Success if the flow input set is supported. **/ @@ -3260,7 +3265,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list, * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table * @pf: Pointer to the PF structure * @flex_pit_list: list of flexible src offsets in use - * #flex_pit_start: index to first entry for this section of the table + * @flex_pit_start: index to first entry for this section of the table * * In order to handle flexible data, the hardware uses a table of values * called the FLX_PIT table. This table is used to indicate which sections of @@ -3374,7 +3379,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf) /** * i40e_flow_str - Converts a flow_type into a human readable string - * @flow_type: the flow type from a flow specification + * @fsp: the flow specification * * Currently only flow types we support are included here, and the string * value attempts to match what ethtool would use to configure this flow type. @@ -4079,7 +4084,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi) /** * i40e_get_channels - Get the current channels enabled and max supported etc. - * @netdev: network interface device structure + * @dev: network interface device structure * @ch: ethtool channels structure * * We don't support separate tx and rx queues as channels. The other count @@ -4088,7 +4093,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi) * q_vectors since we support a lot more queue pairs than q_vectors. **/ static void i40e_get_channels(struct net_device *dev, - struct ethtool_channels *ch) + struct ethtool_channels *ch) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -4107,14 +4112,14 @@ static void i40e_get_channels(struct net_device *dev, /** * i40e_set_channels - Set the new channels count. - * @netdev: network interface device structure + * @dev: network interface device structure * @ch: ethtool channels structure * * The new channels count may not be the same as requested by the user * since it gets rounded down to a power of 2 value. **/ static int i40e_set_channels(struct net_device *dev, - struct ethtool_channels *ch) + struct ethtool_channels *ch) { const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; struct i40e_netdev_priv *np = netdev_priv(dev); @@ -4249,6 +4254,7 @@ out: * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index e40f3b59e42b..19ce93d7fd0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -174,7 +174,6 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index - * @is_pf: distinguishes a VF from a PF * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 41bad112a907..ad01bfc5ec80 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -254,8 +254,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) /** * i40e_find_vsi_from_id - searches for the vsi with the given id - * @pf - the pf structure to search for the vsi - * @id - id of the vsi it is searching for + * @pf: the pf structure to search for the vsi + * @id: id of the vsi it is searching for **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { @@ -411,6 +411,7 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, /** * i40e_get_netdev_stats_struct - Get statistics for netdev interface * @netdev: network interface device structure + * @stats: data structure to store statistics * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. @@ -2003,7 +2004,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) * from firmware * @count: Number of filters added * @add_list: return data from fw - * @head: pointer to first filter in current batch + * @add_head: pointer to first filter in current batch * * MAC filter entries from list were slated to be added to device. Returns * number of successful filters. Note that 0 does NOT mean success! @@ -2110,6 +2111,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, /** * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags * @vsi: pointer to the VSI + * @vsi_name: the VSI name * @f: filter data * * This function sets or clears the promiscuous broadcast flags for VLAN @@ -2816,6 +2818,7 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) /** * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload * @netdev: network interface to be adjusted + * @proto: unused protocol value * @vid: vlan id to be added * * net_device_ops implementation for adding vlan ids @@ -2840,6 +2843,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, /** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted + * @proto: unused protocol value * @vid: vlan id to be removed * * net_device_ops implementation for removing vlan ids @@ -3461,7 +3465,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) /** * i40e_enable_misc_int_causes - enable the non-queue interrupts - * @hw: ptr to the hardware info + * @pf: pointer to private device data structure **/ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) { @@ -5072,7 +5076,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC * @vsi: the VSI being configured * @enabled_tc: TC bitmap - * @bw_credits: BW shared credits per TC + * @bw_share: BW shared credits per TC * * Returns 0 on success, negative value on failure **/ @@ -6329,6 +6333,7 @@ out: /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message + * @isup: true of link is up, false otherwise */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { @@ -9980,7 +9985,7 @@ unlock_pf: /** * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI - * @type: VSI pointer + * @vsi: VSI pointer * @free_qvectors: a bool to specify if q_vectors need to be freed. * * On error: returns error code (negative) @@ -10776,7 +10781,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries - * lut_size: Size of buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ @@ -11476,6 +11481,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added + * @vid: VLAN ID * @flags: instructions from stack about fdb operation */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -11521,6 +11527,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured * @nlh: RTNL message + * @flags: bridge flags * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the @@ -14094,6 +14101,7 @@ static void i40e_remove(struct pci_dev *pdev) /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct + * @error: the type of PCI error * * Called to warn that something happened and the error handling steps * are in progress. Allows the driver to quiesce things, be ready for diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 51554df9cb9a..0299e5bbb902 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1149,6 +1149,7 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) * i40e_nvmupd_check_wait_event - handle NVM update operation events * @hw: pointer to the hardware structure * @opcode: the event that just happened + * @desc: AdminQ descriptor **/ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 47d10ad39c18..43d7c44d6d9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -483,7 +483,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) /** * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping * @pf: Board private structure - * @ifreq: ioctl data + * @ifr: ioctl data * * Obtain the current hardware timestamping settigs as requested. To do this, * keep a shadow copy of the timestamp settings rather than attempting to @@ -627,7 +627,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, /** * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping * @pf: Board private structure - * @ifreq: ioctl data + * @ifr: ioctl data * * Respond to the user filter requests and make the appropriate hardware * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6959897c789e..5efa68de935b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -471,7 +471,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, /** * i40e_add_del_fdir - Build raw packets to add/del fdir filter * @vsi: pointer to the targeted VSI - * @cmd: command to get or set RX flow classification rules + * @input: filter to add or delete * @add: true adds a filter, false removes it * **/ @@ -689,7 +689,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed - * @tx_ring: the ring of descriptors + * @ring: the ring of descriptors * @in_sw: use SW variables * * Since there is no access to the ring head register @@ -1771,6 +1771,8 @@ static inline int i40e_ptype_to_htype(u8 ptype) * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor + * @skb: skb currently being received and modified + * @rx_ptype: Rx packet type **/ static inline void i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3e7ab9fbeb0c..2ceea63cc6cf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -8,8 +8,8 @@ /** * i40e_vc_vf_broadcast * @pf: pointer to the PF structure - * @opcode: operation code - * @retval: return value + * @v_opcode: operation code + * @v_retval: return value * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1639,6 +1639,7 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, /** * i40e_vc_get_version_msg * @vf: pointer to the VF info + * @msg: pointer to the msg buffer * * called from the VF to request the API version used by the PF **/ @@ -1682,7 +1683,6 @@ static void i40e_del_qch(struct i40e_vf *vf) * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to request its resources **/ @@ -1806,8 +1806,6 @@ err: /** * i40e_vc_reset_vf_msg * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to reset itself, * unlike other virtchnl messages, PF driver @@ -3532,15 +3530,16 @@ err: * i40e_vc_process_vf_msg * @pf: pointer to the PF structure * @vf_id: source VF id + * @v_opcode: operation code + * @v_retval: unused return value code * @msg: pointer to the msg buffer * @msglen: msg length - * @msghndl: msg handle * * called from the common aeq/arq handler to * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) + u32 __always_unused v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; @@ -3991,7 +3990,8 @@ error_pvid: * i40e_ndo_set_vf_bw * @netdev: network interface device structure * @vf_id: VF identifier - * @tx_rate: Tx rate + * @min_tx_rate: Minimum Tx rate + * @max_tx_rate: Maximum Tx rate * * configure VF Tx rate **/ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 0841b2d1ca6d..9cef54971312 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1231,6 +1231,7 @@ i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes + * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ enum diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 87101f565be1..a9730711e257 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -105,7 +105,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) /** * i40evf_get_tx_pending - how many Tx descriptors not processed - * @tx_ring: the ring of descriptors + * @ring: the ring of descriptors * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register @@ -1046,6 +1046,8 @@ static inline int i40e_ptype_to_htype(u8 ptype) * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor + * @skb: skb currently being received and modified + * @rx_ptype: Rx packet type **/ static inline void i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c index a30d093a52d6..3cc9d60d0d72 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -178,7 +178,6 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) /** * i40evf_client_add_instance - add a client instance to the instance list * @adapter: pointer to the board struct - * @client: pointer to a client struct in the client list. * * Returns cinst ptr on success, NULL on failure **/ @@ -236,7 +235,6 @@ out: /** * i40evf_client_del_instance - removes a client instance from the list * @adapter: pointer to the board struct - * @client: pointer to the client struct * **/ static @@ -440,7 +438,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map * @ldev: pointer to L2 context. * @client: Client pointer. - * @qv_info: queue and vector list + * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 404595efea78..69efe0aec76a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -202,7 +202,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) /** * i40evf_get_priv_flags - report device private flags - * @dev: network interface device structure + * @netdev: network interface device structure * * The get string set count and the string set should be matched for each * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags @@ -229,7 +229,7 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev) /** * i40evf_set_priv_flags - set private flags - * @dev: network interface device structure + * @netdev: network interface device structure * @flags: bit flags to be set **/ static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) @@ -603,6 +603,7 @@ static int i40evf_set_per_queue_coalesce(struct net_device *netdev, * i40evf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations * * Returns Success if the command is supported. **/ @@ -722,6 +723,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function in use * * Reads the indirection table directly from the hardware. Always returns 0. **/ @@ -750,6 +752,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8f775a82e2fa..28a8cc4a14cb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -449,6 +449,7 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} /** * i40evf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure + * @basename: device basename * * Allocates MSI-X vectors for tx and rx handling, and requests * interrupts from the kernel. @@ -721,6 +722,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) /** * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct + * @proto: unused protocol data * @vid: VLAN tag **/ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, @@ -738,6 +740,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, /** * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct + * @proto: unused protocol data * @vid: VLAN tag **/ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, @@ -3136,7 +3139,7 @@ static int i40evf_set_features(struct net_device *netdev, /** * i40evf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff - * @netdev: This physical port's netdev + * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40evf_features_check(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 8bcefb7f7ac0..565677de5ba3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -155,8 +155,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) /** * i40evf_get_vf_config - * @hw: pointer to the hardware structure - * @len: length of buffer + * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after * admin queue is initialized. Busy waits until response is received from PF, @@ -399,8 +398,6 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) /** * i40evf_add_ether_addrs * @adapter: adapter structure - * @addrs: the MAC address filters to add (contiguous) - * @count: number of filters * * Request that the PF add one or more addresses to our filters. **/ @@ -473,8 +470,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) /** * i40evf_del_ether_addrs * @adapter: adapter structure - * @addrs: the MAC address filters to remove (contiguous) - * @count: number of filtes * * Request that the PF remove one or more addresses from our filters. **/ @@ -547,8 +542,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) /** * i40evf_add_vlans * @adapter: adapter structure - * @vlans: the VLANs to add - * @count: number of VLANs * * Request that the PF add one or more VLAN filters to our VSI. **/ @@ -619,8 +612,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) /** * i40evf_del_vlans * @adapter: adapter structure - * @vlans: the VLANs to remove - * @count: number of VLANs * * Request that the PF remove one or more VLAN filters from our VSI. **/ -- cgit v1.2.3 From 6334f2432fdf5d5252850ff2eebbaa00232f6ab9 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Fri, 20 Apr 2018 01:41:34 -0700 Subject: i40e: fix reading LLDP configuration Previous method for reading LLDP config was based on hard-coded offsets. It happened to work, because of structured architecture of the NVM memory. In the new approach, known as FLAT, we need to calculate the absolute address, instead of using relative values. Needed defines for memory location were added. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 91 ++++++++++++++++++++++++--- drivers/net/ethernet/intel/i40e/i40e_type.h | 8 ++- drivers/net/ethernet/intel/i40evf/i40e_type.h | 10 ++- 3 files changed, 99 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 69e7d4967b1c..56bff8faf371 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -920,6 +920,70 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) return ret; } +/** + * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * @module: address of the module pointer + * @word_offset: offset of LLDP configuration + * + * Reads the LLDP configuration data from NVM using passed addresses + **/ +static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) +{ + u32 address, offset = (2 * word_offset); + i40e_status ret; + __le16 raw_mem; + u16 mem; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + return ret; + + ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret) + return ret; + + mem = le16_to_cpu(raw_mem); + /* Check if this pointer needs to be read in word size or 4K sector + * units. + */ + if (mem & I40E_PTR_TYPE) + address = (0x7FFF & mem) * 4096; + else + address = (0x7FFF & mem) * 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret) + return ret; + + mem = le16_to_cpu(raw_mem); + offset = mem + word_offset; + offset *= 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, 0, address + offset, + sizeof(struct i40e_lldp_variables), lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} + /** * i40e_read_lldp_cfg - read LLDP Configuration data from NVM * @hw: pointer to the HW structure @@ -931,21 +995,34 @@ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg) { i40e_status ret = 0; - u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + u32 mem; if (!lldp_cfg) return I40E_ERR_PARAM; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) - goto err_lldp_cfg; + return ret; - ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, - sizeof(struct i40e_lldp_variables), - (u8 *)lldp_cfg, - true, NULL); + ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), + &mem, true, NULL); i40e_release_nvm(hw); + if (ret) + return ret; + + /* Read a bit that holds information whether we are running flat or + * structured NVM image. Flat image has LLDP configuration in shadow + * ram, so there is a need to pass different addresses for both cases. + */ + if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { + /* Flat NVM case */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, + I40E_SR_LLDP_CFG_PTR); + } else { + /* Good old structured NVM image */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, + I40E_NVM_LLDP_CFG_PTR); + } -err_lldp_cfg: return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 40968a4216a7..7df969c59855 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1294,7 +1294,8 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 @@ -1313,6 +1314,8 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B #define I40E_SR_OCP_ENABLED BIT(15) @@ -1430,7 +1433,8 @@ enum i40e_reset_type { }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0xD +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c77cb9f672d8..094387db3c11 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1233,7 +1233,8 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 @@ -1249,6 +1250,9 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) +#define I40E_PTR_TYPE BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 @@ -1362,6 +1366,10 @@ enum i40e_reset_type { I40E_RESET_EMPR = 3, }; +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 -- cgit v1.2.3 From 6ee4d32255865fc4b383355a8354603d60ab9f8a Mon Sep 17 00:00:00 2001 From: Jakub Pawlak Date: Fri, 20 Apr 2018 01:41:35 -0700 Subject: i40e: Add advertising 10G LR mode The advertising 10G LR mode should be possible to set but in the function i40e_set_link_ksettings() check for this is missed. This patch adds check for 10000baseLR_Full flag for 10G modes. Signed-off-by: Jakub Pawlak Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c1bbfb913e49..fc6a5eef141c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -953,7 +953,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseSR_Full)) + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) config.link_speed |= I40E_LINK_SPEED_10GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 20000baseKR2_Full)) -- cgit v1.2.3 From e4062894d5939bbba9fbed5a70a9eaf6fa397b10 Mon Sep 17 00:00:00 2001 From: Paweł Jabłoński Date: Fri, 20 Apr 2018 01:41:36 -0700 Subject: i40evf: Fix turning TSO, GSO and GRO on after MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes the problem where each MTU change turns TSO, GSO and GRO on from off state. Now when TSO, GSO or GRO is turned off, MTU change does not turn them on. Signed-off-by: Paweł Jabłoński Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 28a8cc4a14cb..3f04a182903d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -3357,6 +3357,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + /* Do not turn on offloads when they are requested to be turned off. + * TSO needs minimum 576 bytes to work correctly. + */ + if (netdev->wanted_features) { + if (!(netdev->wanted_features & NETIF_F_TSO) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO; + if (!(netdev->wanted_features & NETIF_F_TSO6) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO6; + if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) + netdev->features &= ~NETIF_F_TSO_ECN; + if (!(netdev->wanted_features & NETIF_F_GRO)) + netdev->features &= ~NETIF_F_GRO; + if (!(netdev->wanted_features & NETIF_F_GSO)) + netdev->features &= ~NETIF_F_GSO; + } + adapter->vsi.id = adapter->vsi_res->vsi_id; adapter->vsi.back = adapter; -- cgit v1.2.3 From 5305d0fe2f22ec42b19f5ed205faad9b26955e5c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 20 Apr 2018 01:41:37 -0700 Subject: i40e: Fix multiple issues with UDP tunnel offload filter configuration This fixes at least 2 issues I have found with the UDP tunnel filter configuration. The first issue is the fact that the tunnels didn't have any sort of mutual exclusion in place to prevent an update from racing with a user request to add/remove a port. As such you could request to add and remove a port before the port update code had a chance to respond which would result in a very confusing result. To address it I have added 2 changes. First I added the RTNL mutex wrapper around our updating of the pending, port, and filter_index bits. Second I added logic so that we cannot use a port that has a pending deletion since we need to free the space in hardware before we can allow software to reuse it. The second issue addressed is the fact that we were not recording the actual filter index provided to us by the admin queue. As a result we were deleting filters that were not associated with the actual filter we wanted to delete. To fix that I added a filter_index member to the UDP port tracking structure. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 + drivers/net/ethernet/intel/i40e/i40e_main.c | 66 +++++++++++++++++++++++------ 2 files changed, 56 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f573108faec3..70d369e9139c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -310,10 +310,12 @@ struct i40e_tc_configuration { struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; }; +#define I40E_UDP_PORT_INDEX_UNUSED 255 struct i40e_udp_port_config { /* AdminQ command interface expects port number in Host byte order */ u16 port; u8 type; + u8 filter_index; }; /* macros related to FLX_PIT */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ad01bfc5ec80..0babde10fa15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9672,9 +9672,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } -static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +static const char *i40e_tunnel_name(u8 type) { - switch (port->type) { + switch (type) { case UDP_TUNNEL_TYPE_VXLAN: return "vxlan"; case UDP_TUNNEL_TYPE_GENEVE: @@ -9708,37 +9708,68 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf) static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret; + u8 filter_index, type; u16 port; int i; if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) return; + /* acquire RTNL to maintain state of flags and port requests */ + rtnl_lock(); + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { + struct i40e_udp_port_config *udp_port; + i40e_status ret = 0; + + udp_port = &pf->udp_ports[i]; pf->pending_udp_bitmap &= ~BIT_ULL(i); - port = pf->udp_ports[i].port; + + port = READ_ONCE(udp_port->port); + type = READ_ONCE(udp_port->type); + filter_index = READ_ONCE(udp_port->filter_index); + + /* release RTNL while we wait on AQ command */ + rtnl_unlock(); + if (port) ret = i40e_aq_add_udp_tunnel(hw, port, - pf->udp_ports[i].type, - NULL, NULL); - else - ret = i40e_aq_del_udp_tunnel(hw, i, NULL); + type, + &filter_index, + NULL); + else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED) + ret = i40e_aq_del_udp_tunnel(hw, filter_index, + NULL); + + /* reacquire RTNL so we can update filter_index */ + rtnl_lock(); if (ret) { dev_info(&pf->pdev->dev, "%s %s port %d, index %d failed, err %s aq_err %s\n", - i40e_tunnel_name(&pf->udp_ports[i]), + i40e_tunnel_name(type), port ? "add" : "delete", - port, i, + port, + filter_index, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->udp_ports[i].port = 0; + if (port) { + /* failed to add, just reset port, + * drop pending bit for any deletion + */ + udp_port->port = 0; + pf->pending_udp_bitmap &= ~BIT_ULL(i); + } + } else if (port) { + /* record filter index on success */ + udp_port->filter_index = filter_index; } } } + + rtnl_unlock(); } /** @@ -11355,6 +11386,11 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + /* Do not report ports with pending deletions as + * being available. + */ + if (!port && (pf->pending_udp_bitmap & BIT_ULL(i))) + continue; if (pf->udp_ports[i].port == port) return i; } @@ -11409,6 +11445,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].port = port; + pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED; pf->pending_udp_bitmap |= BIT_ULL(next_idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } @@ -11450,7 +11487,12 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, * and make it pending */ pf->udp_ports[idx].port = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); + + /* Toggle pending bit instead of setting it. This way if we are + * deleting a port that has yet to be added we just clear the pending + * bit and don't have to worry about it. + */ + pf->pending_udp_bitmap ^= BIT_ULL(idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); return; -- cgit v1.2.3 From 830e0dd9996c4644e42412aa6c46ed8f8eab0cca Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 20 Apr 2018 01:41:38 -0700 Subject: i40e: avoid overflow in i40e_ptp_adjfreq() When operating at 1GbE, the base incval for the PTP clock is so large that multiplying it by numbers close to the max_adj can overflow the u64. Rather than attempting to limit the max_adj to a value small enough to avoid overflow, instead calculate the incvalue adjustment based on the 40GbE incvalue, and then multiply that by the scaling factor for the link speed. This sacrifices a small amount of precision in the adjustment but we avoid erratic behavior of the clock due to the overflow caused if ppb is very near the maximum adjustment. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 41 ++++++++++++++++++++---------- 2 files changed, 28 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 70d369e9139c..1d59ab6ca90f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -586,7 +586,7 @@ struct i40e_pf { unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ - u64 ptp_base_adj; + u32 ptp_adj_mult; u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 43d7c44d6d9f..aa3daec2049d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -16,9 +16,9 @@ * At 1Gb link, the period is multiplied by 20. (32ns) * 1588 functionality is not supported at 100Mbps. */ -#define I40E_PTP_40GB_INCVAL 0x0199999999ULL -#define I40E_PTP_10GB_INCVAL 0x0333333333ULL -#define I40E_PTP_1GB_INCVAL 0x2000000000ULL +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL_MULT 2 +#define I40E_PTP_1GB_INCVAL_MULT 20 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ @@ -106,17 +106,24 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ppb = -ppb; } - smp_mb(); /* Force any pending update before accessing. */ - adj = READ_ONCE(pf->ptp_base_adj); - - freq = adj; + freq = I40E_PTP_40GB_INCVAL; freq *= ppb; diff = div_u64(freq, 1000000000ULL); if (neg_adj) - adj -= diff; + adj = I40E_PTP_40GB_INCVAL - diff; else - adj += diff; + adj = I40E_PTP_40GB_INCVAL + diff; + + /* At some link speeds, the base incval is so large that directly + * multiplying by ppb would result in arithmetic overflow even when + * using a u64. Avoid this by instead calculating the new incval + * always in terms of the 40GbE clock rate and then multiplying by the + * link speed factor afterwards. This does result in slightly lower + * precision at lower link speeds, but it is fairly minor. + */ + smp_mb(); /* Force any pending update before accessing. */ + adj *= READ_ONCE(pf->ptp_adj_mult); wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); @@ -438,6 +445,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) struct i40e_link_status *hw_link_info; struct i40e_hw *hw = &pf->hw; u64 incval; + u32 mult; hw_link_info = &hw->phy.link_info; @@ -445,10 +453,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) switch (hw_link_info->link_speed) { case I40E_LINK_SPEED_10GB: - incval = I40E_PTP_10GB_INCVAL; + mult = I40E_PTP_10GB_INCVAL_MULT; break; case I40E_LINK_SPEED_1GB: - incval = I40E_PTP_1GB_INCVAL; + mult = I40E_PTP_1GB_INCVAL_MULT; break; case I40E_LINK_SPEED_100MB: { @@ -459,15 +467,20 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); warn_once++; } - incval = 0; + mult = 0; break; } case I40E_LINK_SPEED_40GB: default: - incval = I40E_PTP_40GB_INCVAL; + mult = 1; break; } + /* The increment value is calculated by taking the base 40GbE incvalue + * and multiplying it by a factor based on the link speed. + */ + incval = I40E_PTP_40GB_INCVAL * mult; + /* Write the new increment value into the increment register. The * hardware will not update the clock until both registers have been * written. @@ -476,7 +489,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - WRITE_ONCE(pf->ptp_base_adj, incval); + WRITE_ONCE(pf->ptp_adj_mult, mult); smp_mb(); /* Force the above update. */ } -- cgit v1.2.3 From d0fda04d7e31e52f19ad7a21fb8d6700db55a6e9 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Fri, 20 Apr 2018 01:41:39 -0700 Subject: i40e/i40evf: take into account queue map from vf when handling queues The expectation of the ops VIRTCHNL_OP_ENABLE_QUEUES and VIRTCHNL_OP_DISABLE_QUEUES is that the queue map sent by the VF is taken into account when enabling/disabling queues in the VF VSI. This patch makes sure that happens. By breaking out the individual queue set up functions so that they can be called directly from the i40e_virtchnl_pf.c file, only the queues as specified by the queue bit map that accompanies the enable/disable queues ops will be handled. Signed-off-by: Harshitha Ramamurthy Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 + drivers/net/ethernet/intel/i40e/i40e_main.c | 40 +++++++++---- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 69 +++++++++++++++++++++- 3 files changed, 99 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 1d59ab6ca90f..7a80652e2500 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -987,6 +987,9 @@ void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len); +int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp, + bool enable); +int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable); int i40e_vsi_start_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0babde10fa15..b500bbf6c43f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4235,8 +4235,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) * @is_xdp: true if the queue is used for XDP * @enable: start or stop the queue **/ -static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, - bool is_xdp, bool enable) +int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, + bool is_xdp, bool enable) { int ret; @@ -4281,7 +4281,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) if (ret) break; } - return ret; } @@ -4320,9 +4319,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) * @pf_q: the PF queue to configure * @enable: start or stop the queue * - * This function enables or disables a single queue. Note that any delay - * required after the operation is expected to be handled by the caller of - * this function. + * This function enables or disables a single queue. Note that + * any delay required after the operation is expected to be + * handled by the caller of this function. **/ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { @@ -4351,6 +4350,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); } +/** + * i40e_control_wait_rx_q + * @pf: the PF structure + * @pf_q: queue being configured + * @enable: start or stop the rings + * + * This function enables or disables a single queue along with waiting + * for the change to finish. The caller of this function should handle + * the delays needed in the case of disabling queues. + **/ +int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + int ret = 0; + + i40e_control_rx_q(pf, pf_q, enable); + + /* wait for the change to finish */ + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) + return ret; + + return ret; +} + /** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured @@ -4363,10 +4386,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - i40e_control_rx_q(pf, pf_q, enable); - - /* wait for the change to finish */ - ret = i40e_pf_rxq_wait(pf, pf_q, enable); + ret = i40e_control_wait_rx_q(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2ceea63cc6cf..c6d24eaede18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2153,6 +2153,51 @@ error_param: aq_ret); } +/** + * i40e_ctrl_vf_tx_rings + * @vsi: the SRIOV VSI being configured + * @q_map: bit map of the queues to be enabled + * @enable: start or stop the queue + **/ +static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int ret = 0; + u16 q_id; + + for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { + ret = i40e_control_wait_tx_q(vsi->seid, pf, + vsi->base_queue + q_id, + false /*is xdp*/, enable); + if (ret) + break; + } + return ret; +} + +/** + * i40e_ctrl_vf_rx_rings + * @vsi: the SRIOV VSI being configured + * @q_map: bit map of the queues to be enabled + * @enable: start or stop the queue + **/ +static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int ret = 0; + u16 q_id; + + for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { + ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, + enable); + if (ret) + break; + } + return ret; +} + /** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info @@ -2185,8 +2230,17 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) + /* Use the queue bit map sent by the VF */ + if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, + true)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } + if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, + true)) { aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } /* need to start the rings for additional ADq VSI's as well */ if (vf->adq_enabled) { @@ -2234,8 +2288,17 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); - + /* Use the queue bit map sent by the VF */ + if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, + false)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } + if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, + false)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, -- cgit v1.2.3 From bf1099b5ea853bddaa0dc4b1c594a2fdbebaf4c9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 20 Apr 2018 01:41:40 -0700 Subject: i40e: use %pI4b instead of byte swapping before dev_err Fix warnings regarding restricted __be32 type usage by strictly specifying the type of the ipv4 address being printed in the dev_err statement. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b500bbf6c43f..c8659fbd7111 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7213,8 +7213,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, if (mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { - mask->dst = be32_to_cpu(mask->dst); - dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n", + dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &mask->dst); return I40E_ERR_CONFIG; } @@ -7224,8 +7223,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, if (mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { - mask->src = be32_to_cpu(mask->src); - dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n", + dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &mask->src); return I40E_ERR_CONFIG; } -- cgit v1.2.3 From 541e11595cb27591a7b3f4327472f282f482a5b0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 29 Apr 2018 10:56:09 +0300 Subject: mlxsw: spectrum: Extract mlxsw_sp_stp_spms_state() Instead of duplicating the decision regarding port forwarding state made by mlxsw_sp_port_vid_stp_set(), extract the decision-making into a new function and reuse. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 26 +++++++++++++------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ca38a30fbe91..7317fb8079d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -441,29 +441,29 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); } -int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, - u8 state) +enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - enum mlxsw_reg_spms_state spms_state; - char *spms_pl; - int err; - switch (state) { case BR_STATE_FORWARDING: - spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; - break; + return MLXSW_REG_SPMS_STATE_FORWARDING; case BR_STATE_LEARNING: - spms_state = MLXSW_REG_SPMS_STATE_LEARNING; - break; + return MLXSW_REG_SPMS_STATE_LEARNING; case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: - spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; - break; + return MLXSW_REG_SPMS_STATE_DISCARDING; default: BUG(); } +} + +int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, + u8 state) +{ + enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spms_pl; + int err; spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); if (!spms_pl) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 804d4d2c8031..4a519d8edec8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -364,6 +364,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); +enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state); int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u8 state); int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); -- cgit v1.2.3 From ea93c7b6083deebc026ed45b61e050c42322a2a6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 29 Apr 2018 10:56:10 +0300 Subject: mlxsw: spectrum_switchdev: Publish two functions Publish the existing function mlxsw_sp_bridge_port_find(), and add another service accessor mlxsw_sp_bridge_port_stp_state(). Publish both in a new file spectrum_switchdev.h. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 9 ++++- .../ethernet/mellanox/mlxsw/spectrum_switchdev.h | 43 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c11c9a635866..db4aea0f8996 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -50,6 +50,7 @@ #include #include "spectrum_router.h" +#include "spectrum_switchdev.h" #include "spectrum.h" #include "core.h" #include "reg.h" @@ -239,7 +240,7 @@ __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, return NULL; } -static struct mlxsw_sp_bridge_port * +struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, struct net_device *brport_dev) { @@ -2297,6 +2298,12 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = { .notifier_call = mlxsw_sp_switchdev_event, }; +u8 +mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) +{ + return bridge_port->stp_state; +} + static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h new file mode 100644 index 000000000000..bc44d5effc28 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +struct mlxsw_sp_bridge; +struct mlxsw_sp_bridge_port; + +struct mlxsw_sp_bridge_port * +mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, + struct net_device *brport_dev); + +u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port); -- cgit v1.2.3 From cda880de939ed81dbd310e2d6784451609fc5913 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 29 Apr 2018 10:56:11 +0300 Subject: mlxsw: spectrum: Register SPAN before switchdev Since switchdev events can trigger SPAN respin, it is necessary that the data structures are available. Register SPAN first, with a commentary on what the dependencies are. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 7317fb8079d1..94132f6cec61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3666,6 +3666,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_lag_init; } + /* Initialize SPAN before router and switchdev, so that those components + * can call mlxsw_sp_span_respin(). + */ + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -3684,15 +3693,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } - err = mlxsw_sp_span_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); - goto err_span_init; - } - - /* Initialize router after SPAN is initialized, so that the FIB and - * neighbor event handlers can issue SPAN respin. - */ err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3739,14 +3739,14 @@ err_acl_init: err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: - mlxsw_sp_span_fini(mlxsw_sp); -err_span_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); err_counter_pool_init: mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: mlxsw_sp_lag_fini(mlxsw_sp); err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); @@ -3768,10 +3768,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_acl_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); - mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); -- cgit v1.2.3 From c520bc6986472621469f9dc1cec1a5dbbb4769bf Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 29 Apr 2018 10:56:12 +0300 Subject: mlxsw: Respin SPAN on switchdev events Changes to switchdev artifact can make a SPAN entry offloadable or unoffloadable. To that end: - Listen to SWITCHDEV_FDB_*_TO_BRIDGE notifications in addition to the *_TO_DEVICE ones, to catch whatever activity is sent to the bridge (likely by mlxsw itself). On each FDB notification, respin SPAN to reconcile it with the FDB changes. - Also respin on switchdev port attribute changes (which currently covers changes to STP state of ports) and port object additions and removals. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 63 ++++++++++++++++++++-- 1 file changed, 59 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db4aea0f8996..1af99fe5fd32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -49,6 +49,7 @@ #include #include +#include "spectrum_span.h" #include "spectrum_router.h" #include "spectrum_switchdev.h" #include "spectrum.h" @@ -923,6 +924,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, break; } + if (switchdev_trans_ph_commit(trans)) + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + return err; } @@ -1647,18 +1651,57 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, } } +struct mlxsw_sp_span_respin_work { + struct work_struct work; + struct mlxsw_sp *mlxsw_sp; +}; + +static void mlxsw_sp_span_respin_work(struct work_struct *work) +{ + struct mlxsw_sp_span_respin_work *respin_work = + container_of(work, struct mlxsw_sp_span_respin_work, work); + + rtnl_lock(); + mlxsw_sp_span_respin(respin_work->mlxsw_sp); + rtnl_unlock(); + kfree(respin_work); +} + +static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_span_respin_work *respin_work; + + respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); + if (!respin_work) + return; + + INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); + respin_work->mlxsw_sp = mlxsw_sp; + + mlxsw_core_schedule_work(&respin_work->work); +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + const struct switchdev_obj_port_vlan *vlan; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), - trans); + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); + + if (switchdev_trans_ph_commit(trans)) { + /* The event is emitted before the changes are actually + * applied to the bridge. Therefore schedule the respin + * call for later, so that the respin logic sees the + * updated bridge state. + */ + mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); + } break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, @@ -1809,6 +1852,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, break; } + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + return err; } @@ -2236,8 +2281,16 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) fdb_info = &switchdev_work->fdb_info; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_DEL_TO_BRIDGE: + /* These events are only used to potentially update an existing + * SPAN mirror. + */ + break; } + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + out: rtnl_unlock(); kfree(switchdev_work->fdb_info.addr); @@ -2266,7 +2319,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_DEL_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_DEL_TO_BRIDGE: memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); -- cgit v1.2.3 From 946a11e7408e52a6d9e5f3187c32b6c2a5f88835 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 29 Apr 2018 10:56:13 +0300 Subject: mlxsw: spectrum_span: Allow bridge for gretap mirror When handling mirroring to a gretap or ip6gretap netdevice in mlxsw, the underlay address (i.e. the remote address of the tunnel) may be routed to a bridge. In that case, look up the resolved neighbor Ethernet address in that bridge's FDB. Then configure the offload to direct the mirrored traffic to that port, possibly with tagging. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 95 ++++++++++++++++++++-- .../net/ethernet/mellanox/mlxsw/spectrum_span.h | 1 + 2 files changed, 90 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 65a77708ff61..cd9071ee19ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -32,6 +32,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -39,8 +40,9 @@ #include #include "spectrum.h" -#include "spectrum_span.h" #include "spectrum_ipip.h" +#include "spectrum_span.h" +#include "spectrum_switchdev.h" int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) { @@ -167,6 +169,72 @@ mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) return 0; } +static struct net_device * +mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, + unsigned char *dmac, + u16 *p_vid) +{ + struct bridge_vlan_info vinfo; + struct net_device *edev; + u16 pvid; + + if (WARN_ON(br_vlan_get_pvid(br_dev, &pvid))) + return NULL; + if (!pvid) + return NULL; + + edev = br_fdb_find_port(br_dev, dmac, pvid); + if (!edev) + return NULL; + + if (br_vlan_get_info(edev, pvid, &vinfo)) + return NULL; + if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)) + *p_vid = pvid; + return edev; +} + +static struct net_device * +mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, + unsigned char *dmac) +{ + return br_fdb_find_port(br_dev, dmac, 0); +} + +static struct net_device * +mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, + unsigned char dmac[ETH_ALEN], + u16 *p_vid) +{ + struct mlxsw_sp_bridge_port *bridge_port; + enum mlxsw_reg_spms_state spms_state; + struct mlxsw_sp_port *port; + struct net_device *dev; + u8 stp_state; + + if (br_vlan_enabled(br_dev)) + dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); + else + dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); + if (!dev) + return NULL; + + port = mlxsw_sp_port_dev_lower_find(dev); + if (!port) + return NULL; + + bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); + if (!bridge_port) + return NULL; + + stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); + spms_state = mlxsw_sp_stp_spms_state(stp_state); + if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) + return NULL; + + return dev; +} + static __maybe_unused int mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, union mlxsw_sp_l3addr saddr, @@ -177,13 +245,22 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, struct mlxsw_sp_span_parms *sparmsp) { unsigned char dmac[ETH_ALEN]; + u16 vid = 0; if (mlxsw_sp_l3addr_is_zero(gw)) gw = daddr; - if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) || - mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) - return mlxsw_sp_span_entry_unoffloadable(sparmsp); + if (!l3edev || mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) + goto unoffloadable; + + if (netif_is_bridge_master(l3edev)) { + l3edev = mlxsw_sp_span_entry_bridge(l3edev, dmac, &vid); + if (!l3edev) + goto unoffloadable; + } + + if (!mlxsw_sp_port_dev_check(l3edev)) + goto unoffloadable; sparmsp->dest_port = netdev_priv(l3edev); sparmsp->ttl = ttl; @@ -191,7 +268,11 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); sparmsp->saddr = saddr; sparmsp->daddr = daddr; + sparmsp->vid = vid; return 0; + +unoffloadable: + return mlxsw_sp_span_entry_unoffloadable(sparmsp); } #if IS_ENABLED(CONFIG_NET_IPGRE) @@ -268,9 +349,10 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, - sparms.dmac, false); + sparms.dmac, !!sparms.vid); mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, sparms.ttl, sparms.smac, be32_to_cpu(sparms.saddr.addr4), @@ -368,9 +450,10 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, - sparms.dmac, false); + sparms.dmac, !!sparms.vid); mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, sparms.saddr.addr6, sparms.daddr.addr6); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 4b87ec20e658..14a6de904db1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -63,6 +63,7 @@ struct mlxsw_sp_span_parms { unsigned char smac[ETH_ALEN]; union mlxsw_sp_l3addr daddr; union mlxsw_sp_l3addr saddr; + u16 vid; }; struct mlxsw_sp_span_entry_ops; -- cgit v1.2.3 From bb9094161b2320e431a5d8a7b9c3dc632bc92ae6 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 30 Apr 2018 10:16:17 +0300 Subject: net/mlx5e: Move defines out of ipsec code The defines are not IPSEC specific. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 3 --- drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 5 +---- drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h | 2 ++ 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 56c748ca2a09..66ecb1c81002 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -55,6 +55,9 @@ struct page_pool; +#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) +#define MLX5E_METADATA_ETHER_LEN 8 + #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 1198fc1eba4c..93bf10e6508c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -45,9 +45,6 @@ #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L -#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) -#define MLX5E_METADATA_ETHER_LEN 8 - struct mlx5e_priv; struct mlx5e_ipsec_sw_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0f5da499a223..3c4f1f326e13 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -43,9 +43,6 @@ #include "fpga/sdk.h" #include "fpga/core.h" -#define SBU_QP_QUEUE_SIZE 8 -#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000) - enum mlx5_fpga_ipsec_cmd_status { MLX5_FPGA_IPSEC_CMD_PENDING, MLX5_FPGA_IPSEC_CMD_SEND_FAIL, @@ -258,7 +255,7 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx) { struct mlx5_fpga_ipsec_cmd_context *context = ctx; unsigned long timeout = - msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); + msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC); int res; res = wait_for_completion_timeout(&context->complete, timeout); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index baa537e54a49..a0573cc2fc9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -41,6 +41,8 @@ * DOC: Innova SDK * This header defines the in-kernel API for Innova FPGA client drivers. */ +#define SBU_QP_QUEUE_SIZE 8 +#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000) enum mlx5_fpga_access_type { MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, -- cgit v1.2.3 From 1ae1732284895498b7119e42323cf12821423e6d Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 30 Apr 2018 10:16:18 +0300 Subject: net/mlx5: Accel, Add TLS tx offload interface Add routines for manipulating TLS TX offload contexts. In Innova TLS, TLS contexts are added or deleted via a command message over the SBU connection. The HW then sends a response message over the same connection. Add implementation for Innova TLS (FPGA-based) hardware. These routines will be used by the TLS offload support in a later patch mlx5/accel is a middle acceleration layer to allow mlx5e and other ULPs to work directly with mlx5_core rather than Innova FPGA or other mlx5 acceleration providers. In the future, when IPSec/TLS or any other acceleration gets integrated into ConnectX chip, mlx5/accel layer will provide the integrated acceleration, rather than the Innova one. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 4 +- .../net/ethernet/mellanox/mlx5/core/accel/tls.c | 71 +++ .../net/ethernet/mellanox/mlx5/core/accel/tls.h | 86 ++++ .../net/ethernet/mellanox/mlx5/core/fpga/core.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | 562 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h | 68 +++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 11 + include/linux/mlx5/mlx5_ifc.h | 16 - include/linux/mlx5/mlx5_ifc_fpga.h | 77 +++ 9 files changed, 878 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index c805769d92a9..9989e5265a45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -8,10 +8,10 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ diag/fs_tracepoint.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o +mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ - fpga/ipsec.o + fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c new file mode 100644 index 000000000000..77ac19f38cbe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include + +#include "accel/tls.h" +#include "mlx5_core.h" +#include "fpga/tls.h" + +int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) +{ + return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, p_swid); +} + +void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) +{ + mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); +} + +bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_is_tls_device(mdev); +} + +u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_tls_device_caps(mdev); +} + +int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_tls_init(mdev); +} + +void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) +{ + mlx5_fpga_tls_cleanup(mdev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h new file mode 100644 index 000000000000..6f9c9f446ecc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_ACCEL_TLS_H__ +#define __MLX5_ACCEL_TLS_H__ + +#include +#include + +#ifdef CONFIG_MLX5_ACCEL + +enum { + MLX5_ACCEL_TLS_TX = BIT(0), + MLX5_ACCEL_TLS_RX = BIT(1), + MLX5_ACCEL_TLS_V12 = BIT(2), + MLX5_ACCEL_TLS_V13 = BIT(3), + MLX5_ACCEL_TLS_LRO = BIT(4), + MLX5_ACCEL_TLS_IPV6 = BIT(5), + MLX5_ACCEL_TLS_AES_GCM128 = BIT(30), + MLX5_ACCEL_TLS_AES_GCM256 = BIT(31), +}; + +struct mlx5_ifc_tls_flow_bits { + u8 src_port[0x10]; + u8 dst_port[0x10]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; + u8 ipv6[0x1]; + u8 direction_sx[0x1]; + u8 reserved_at_2[0x1e]; +}; + +int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid); +void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); +bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); +u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); +int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); +void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); + +#else + +static inline int +mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } +static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } +static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } +static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } +static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } +static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { } + +#endif + +#endif /* __MLX5_ACCEL_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 82405ed84725..3e2355c8df3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -53,6 +53,7 @@ struct mlx5_fpga_device { } conn_res; struct mlx5_fpga_ipsec *ipsec; + struct mlx5_fpga_tls *tls; }; #define mlx5_fpga_dbg(__adev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c new file mode 100644 index 000000000000..21048013826c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include "fpga/tls.h" +#include "fpga/cmd.h" +#include "fpga/sdk.h" +#include "fpga/core.h" +#include "accel/tls.h" + +struct mlx5_fpga_tls_command_context; + +typedef void (*mlx5_fpga_tls_command_complete) + (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *ctx, + struct mlx5_fpga_dma_buf *resp); + +struct mlx5_fpga_tls_command_context { + struct list_head list; + /* There is no guarantee on the order between the TX completion + * and the command response. + * The TX completion is going to touch cmd->buf even in + * the case of successful transmission. + * So instead of requiring separate allocations for cmd + * and cmd->buf we've decided to use a reference counter + */ + refcount_t ref; + struct mlx5_fpga_dma_buf buf; + mlx5_fpga_tls_command_complete complete; +}; + +static void +mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx) +{ + if (refcount_dec_and_test(&ctx->ref)) + kfree(ctx); +} + +static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_fpga_conn *conn = fdev->tls->conn; + struct mlx5_fpga_tls_command_context *ctx; + struct mlx5_fpga_tls *tls = fdev->tls; + unsigned long flags; + + spin_lock_irqsave(&tls->pending_cmds_lock, flags); + ctx = list_first_entry(&tls->pending_cmds, + struct mlx5_fpga_tls_command_context, list); + list_del(&ctx->list); + spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); + ctx->complete(conn, fdev, ctx, resp); +} + +static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *buf, + u8 status) +{ + struct mlx5_fpga_tls_command_context *ctx = + container_of(buf, struct mlx5_fpga_tls_command_context, buf); + + mlx5_fpga_tls_put_command_ctx(ctx); + + if (unlikely(status)) + mlx5_fpga_tls_cmd_complete(fdev, NULL); +} + +static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + mlx5_fpga_tls_command_complete complete) +{ + struct mlx5_fpga_tls *tls = fdev->tls; + unsigned long flags; + int ret; + + refcount_set(&cmd->ref, 2); + cmd->complete = complete; + cmd->buf.complete = mlx5_fpga_cmd_send_complete; + + spin_lock_irqsave(&tls->pending_cmds_lock, flags); + /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock + * to make sure commands are inserted to the tls->pending_cmds list + * and the command QP in the same order. + */ + ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); + if (likely(!ret)) + list_add_tail(&cmd->list, &tls->pending_cmds); + else + complete(tls->conn, fdev, cmd, NULL); + spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); +} + +/* Start of context identifiers range (inclusive) */ +#define SWID_START 0 +/* End of context identifiers range (exclusive) */ +#define SWID_END BIT(24) + +static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, + void *ptr) +{ + int ret; + + /* TLS metadata format is 1 byte for syndrome followed + * by 3 bytes of swid (software ID) + * swid must not exceed 3 bytes. + * See tls_rxtx.c:insert_pet() for details + */ + BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); + + idr_preload(GFP_KERNEL); + spin_lock_irq(idr_spinlock); + ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); + spin_unlock_irq(idr_spinlock); + idr_preload_end(); + + return ret; +} + +static void mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) +{ + unsigned long flags; + + spin_lock_irqsave(idr_spinlock, flags); + idr_remove(idr, swid); + spin_unlock_irqrestore(idr_spinlock, flags); +} + +struct mlx5_teardown_stream_context { + struct mlx5_fpga_tls_command_context cmd; + u32 swid; +}; + +static void +mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_teardown_stream_context *ctx = + container_of(cmd, struct mlx5_teardown_stream_context, cmd); + + if (resp) { + u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); + + if (syndrome) + mlx5_fpga_err(fdev, + "Teardown stream failed with syndrome = %d", + syndrome); + else + mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, + &fdev->tls->idr_spinlock, + ctx->swid); + } + mlx5_fpga_tls_put_command_ctx(cmd); +} + +static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) +{ + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow, + MLX5_BYTE_OFF(tls_flow, ipv6)); + + MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); + MLX5_SET(tls_cmd, cmd, direction_sx, + MLX5_GET(tls_flow, flow, direction_sx)); +} + +void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, + u32 swid, gfp_t flags) +{ + struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_dma_buf *buf; + void *cmd; + + ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags); + if (!ctx) + return; + + buf = &ctx->cmd.buf; + cmd = (ctx + 1); + MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); + MLX5_SET(tls_cmd, cmd, swid, swid); + + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + kfree(flow); + + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + + ctx->swid = swid; + mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_teardown_completion); +} + +void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags) +{ + struct mlx5_fpga_tls *tls = mdev->fpga->tls; + void *flow; + + rcu_read_lock(); + flow = idr_find(&tls->tx_idr, swid); + rcu_read_unlock(); + + if (!flow) { + mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", + swid); + return; + } + + mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); +} + +enum mlx5_fpga_setup_stream_status { + MLX5_FPGA_CMD_PENDING, + MLX5_FPGA_CMD_SEND_FAILED, + MLX5_FPGA_CMD_RESPONSE_RECEIVED, + MLX5_FPGA_CMD_ABANDONED, +}; + +struct mlx5_setup_stream_context { + struct mlx5_fpga_tls_command_context cmd; + atomic_t status; + u32 syndrome; + struct completion comp; +}; + +static void +mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_setup_stream_context *ctx = + container_of(cmd, struct mlx5_setup_stream_context, cmd); + int status = MLX5_FPGA_CMD_SEND_FAILED; + void *tls_cmd = ctx + 1; + + /* If we failed to send to command resp == NULL */ + if (resp) { + ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); + status = MLX5_FPGA_CMD_RESPONSE_RECEIVED; + } + + status = atomic_xchg_release(&ctx->status, status); + if (likely(status != MLX5_FPGA_CMD_ABANDONED)) { + complete(&ctx->comp); + return; + } + + mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n", + ctx->syndrome); + + if (!ctx->syndrome) { + /* The process was killed while waiting for the context to be + * added, and the add completed successfully. + * We need to destroy the HW context, and we can't can't reuse + * the command context because we might not have received + * the tx completion yet. + */ + mlx5_fpga_tls_del_tx_flow(fdev->mdev, + MLX5_GET(tls_cmd, tls_cmd, swid), + GFP_ATOMIC); + } + + mlx5_fpga_tls_put_command_ctx(cmd); +} + +static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev, + struct mlx5_setup_stream_context *ctx) +{ + struct mlx5_fpga_dma_buf *buf; + void *cmd = ctx + 1; + int status, ret = 0; + + buf = &ctx->cmd.buf; + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM); + + init_completion(&ctx->comp); + atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING); + ctx->syndrome = -1; + + mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_setup_completion); + wait_for_completion_killable(&ctx->comp); + + status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED); + if (unlikely(status == MLX5_FPGA_CMD_PENDING)) + /* ctx is going to be released in mlx5_fpga_tls_setup_completion */ + return -EINTR; + + if (unlikely(ctx->syndrome)) + ret = -ENOMEM; + + mlx5_fpga_tls_put_command_ctx(&ctx->cmd); + return ret; +} + +static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg, + struct mlx5_fpga_dma_buf *buf) +{ + struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg; + + mlx5_fpga_tls_cmd_complete(fdev, buf); +} + +bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev) +{ + if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) + return false; + + if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != + MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) + return false; + + if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS) + return false; + + if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0) + return false; + + return true; +} + +static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev, + u32 *p_caps) +{ + int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap); + u32 caps = 0; + void *buf; + + buf = kzalloc(cap_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf); + if (err) + goto out; + + if (MLX5_GET(tls_extended_cap, buf, tx)) + caps |= MLX5_ACCEL_TLS_TX; + if (MLX5_GET(tls_extended_cap, buf, rx)) + caps |= MLX5_ACCEL_TLS_RX; + if (MLX5_GET(tls_extended_cap, buf, tls_v12)) + caps |= MLX5_ACCEL_TLS_V12; + if (MLX5_GET(tls_extended_cap, buf, tls_v13)) + caps |= MLX5_ACCEL_TLS_V13; + if (MLX5_GET(tls_extended_cap, buf, lro)) + caps |= MLX5_ACCEL_TLS_LRO; + if (MLX5_GET(tls_extended_cap, buf, ipv6)) + caps |= MLX5_ACCEL_TLS_IPV6; + + if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128)) + caps |= MLX5_ACCEL_TLS_AES_GCM128; + if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256)) + caps |= MLX5_ACCEL_TLS_AES_GCM256; + + *p_caps = caps; + err = 0; +out: + kfree(buf); + return err; +} + +int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_conn_attr init_attr = {0}; + struct mlx5_fpga_conn *conn; + struct mlx5_fpga_tls *tls; + int err = 0; + + if (!mlx5_fpga_is_tls_device(mdev) || !fdev) + return 0; + + tls = kzalloc(sizeof(*tls), GFP_KERNEL); + if (!tls) + return -ENOMEM; + + err = mlx5_fpga_tls_get_caps(fdev, &tls->caps); + if (err) + goto error; + + if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | + MLX5_ACCEL_TLS_AES_GCM128))) { + err = -ENOTSUPP; + goto error; + } + + init_attr.rx_size = SBU_QP_QUEUE_SIZE; + init_attr.tx_size = SBU_QP_QUEUE_SIZE; + init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb; + init_attr.cb_arg = fdev; + conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); + if (IS_ERR(conn)) { + err = PTR_ERR(conn); + mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n", + err); + goto error; + } + + tls->conn = conn; + spin_lock_init(&tls->pending_cmds_lock); + INIT_LIST_HEAD(&tls->pending_cmds); + + idr_init(&tls->tx_idr); + spin_lock_init(&tls->idr_spinlock); + fdev->tls = tls; + return 0; + +error: + kfree(tls); + return err; +} + +void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + + if (!fdev || !fdev->tls) + return; + + mlx5_fpga_sbu_conn_destroy(fdev->tls->conn); + kfree(fdev->tls); + fdev->tls = NULL; +} + +static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd, + struct tls_crypto_info *info, + __be64 *rcd_sn) +{ + struct tls12_crypto_info_aes_gcm_128 *crypto_info = + (struct tls12_crypto_info_aes_gcm_128 *)info; + + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq, + TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); + + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv), + crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key), + crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); + + /* in AES-GCM 128 we need to write the key twice */ + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) + + TLS_CIPHER_AES_GCM_128_KEY_SIZE, + crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); + + MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128); +} + +static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, + struct tls_crypto_info *crypto_info) +{ + __be64 rcd_sn; + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + if (!(caps & MLX5_ACCEL_TLS_AES_GCM128)) + return -EINVAL; + mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, u32 swid, + u32 tcp_sn) +{ + u32 caps = mlx5_fpga_tls_device_caps(mdev); + struct mlx5_setup_stream_context *ctx; + int ret = -ENOMEM; + size_t cmd_size; + void *cmd; + + cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx); + ctx = kzalloc(cmd_size, GFP_KERNEL); + if (!ctx) + goto out; + + cmd = ctx + 1; + ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info); + if (ret) + goto free_ctx; + + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + + MLX5_SET(tls_cmd, cmd, swid, swid); + MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn); + + return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx); + +free_ctx: + kfree(ctx); +out: + return ret; +} + +int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) +{ + struct mlx5_fpga_tls *tls = mdev->fpga->tls; + int ret = -ENOMEM; + u32 swid; + + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); + if (ret < 0) + return ret; + + swid = ret; + MLX5_SET(tls_flow, flow, direction_sx, 1); + + ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, + start_offload_tcp_sn); + if (ret && ret != -EINTR) + goto free_swid; + + *p_swid = swid; + return 0; +free_swid: + mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); + + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h new file mode 100644 index 000000000000..800a214e4e49 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_FPGA_TLS_H__ +#define __MLX5_FPGA_TLS_H__ + +#include + +#include +#include "fpga/core.h" + +struct mlx5_fpga_tls { + struct list_head pending_cmds; + spinlock_t pending_cmds_lock; /* Protects pending_cmds */ + u32 caps; + struct mlx5_fpga_conn *conn; + + struct idr tx_idr; + spinlock_t idr_spinlock; /* protects the IDR */ +}; + +int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid); + +void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags); + +bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); +int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); +void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev); + +static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) +{ + return mdev->fpga->tls->caps; +} + +#endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 63a8ea31601c..b865597630ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -60,6 +60,7 @@ #include "fpga/core.h" #include "fpga/ipsec.h" #include "accel/ipsec.h" +#include "accel/tls.h" #include "lib/clock.h" MODULE_AUTHOR("Eli Cohen "); @@ -1190,6 +1191,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_ipsec_start; } + err = mlx5_accel_tls_init(dev); + if (err) { + dev_err(&pdev->dev, "TLS device start failed %d\n", err); + goto err_tls_start; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1231,6 +1238,9 @@ err_sriov: mlx5_cleanup_fs(dev); err_fs: + mlx5_accel_tls_cleanup(dev); + +err_tls_start: mlx5_accel_ipsec_cleanup(dev); err_ipsec_start: @@ -1306,6 +1316,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); + mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1aad455538f4..b8918a1da11f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -356,22 +356,6 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 reserved_at_6[0x1a]; }; -struct mlx5_ifc_ipv4_layout_bits { - u8 reserved_at_0[0x60]; - - u8 ipv4[0x20]; -}; - -struct mlx5_ifc_ipv6_layout_bits { - u8 ipv6[16][0x8]; -}; - -union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { - struct mlx5_ifc_ipv6_layout_bits ipv6_layout; - struct mlx5_ifc_ipv4_layout_bits ipv4_layout; - u8 reserved_at_0[0x80]; -}; - struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index ec052491ba3d..193091537cb6 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -32,12 +32,29 @@ #ifndef MLX5_IFC_FPGA_H #define MLX5_IFC_FPGA_H +struct mlx5_ifc_ipv4_layout_bits { + u8 reserved_at_0[0x60]; + + u8 ipv4[0x20]; +}; + +struct mlx5_ifc_ipv6_layout_bits { + u8 ipv6[16][0x8]; +}; + +union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; + u8 reserved_at_0[0x80]; +}; + enum { MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, }; enum { MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3, }; struct mlx5_ifc_fpga_shell_caps_bits { @@ -370,6 +387,27 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_tls_extended_cap_bits { + u8 aes_gcm_128[0x1]; + u8 aes_gcm_256[0x1]; + u8 reserved_at_2[0x1e]; + u8 reserved_at_20[0x20]; + u8 context_capacity_total[0x20]; + u8 context_capacity_rx[0x20]; + u8 context_capacity_tx[0x20]; + u8 reserved_at_a0[0x10]; + u8 tls_counter_size[0x10]; + u8 tls_counters_addr_low[0x20]; + u8 tls_counters_addr_high[0x20]; + u8 rx[0x1]; + u8 tx[0x1]; + u8 tls_v12[0x1]; + u8 tls_v13[0x1]; + u8 lro[0x1]; + u8 ipv6[0x1]; + u8 reserved_at_106[0x1a]; +}; + struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; @@ -519,4 +557,43 @@ struct mlx5_ifc_fpga_ipsec_sa { __be16 reserved2; } __packed; +enum fpga_tls_cmds { + CMD_SETUP_STREAM = 0x1001, + CMD_TEARDOWN_STREAM = 0x1002, +}; + +#define MLX5_TLS_1_2 (0) + +#define MLX5_TLS_ALG_AES_GCM_128 (0) +#define MLX5_TLS_ALG_AES_GCM_256 (1) + +struct mlx5_ifc_tls_cmd_bits { + u8 command_type[0x20]; + u8 ipv6[0x1]; + u8 direction_sx[0x1]; + u8 tls_version[0x2]; + u8 reserved[0x1c]; + u8 swid[0x20]; + u8 src_port[0x10]; + u8 dst_port[0x10]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; + u8 tls_rcd_sn[0x40]; + u8 tcp_sn[0x20]; + u8 tls_implicit_iv[0x20]; + u8 tls_xor_iv[0x40]; + u8 encryption_key[0x100]; + u8 alg[4]; + u8 reserved2[0x1c]; + u8 reserved3[0x4a0]; +}; + +struct mlx5_ifc_tls_resp_bits { + u8 syndrome[0x20]; + u8 stream_id[0x20]; + u8 reserverd[0x40]; +}; + +#define MLX5_TLS_COMMAND_SIZE (0x100) + #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From c83294b9efa51e19b582a5962e1366196f684dba Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 30 Apr 2018 10:16:19 +0300 Subject: net/mlx5e: TLS, Add Innova TLS TX support Add NETIF_F_HW_TLS_TX capability and expose tlsdev_ops to work with the TLS generic NIC offload infrastructure. The NETIF_F_HW_TLS_TX capability will be added in the next patch. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 11 ++ drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 + .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 173 +++++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 65 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 + 5 files changed, 254 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 12257034131e..ee6684779d11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -86,3 +86,14 @@ config MLX5_EN_IPSEC Build support for IPsec cryptography-offload accelaration in the NIC. Note: Support for hardware with this capability needs to be selected for this option to become available. + +config MLX5_EN_TLS + bool "TLS cryptography-offload accelaration" + depends on MLX5_CORE_EN + depends on TLS_DEVICE + depends on MLX5_ACCEL + default n + ---help--- + Build support for TLS cryptography-offload accelaration in the NIC. + Note: Support for hardware with this capability needs to be selected + for this option to become available. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9989e5265a45..50872ed30c0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,4 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o + CFLAGS_tracepoint.o := -I$(src) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c new file mode 100644 index 000000000000..38d88108a55a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include "en_accel/tls.h" +#include "accel/tls.h" + +static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) +{ + struct inet_sock *inet = inet_sk(sk); + + MLX5_SET(tls_flow, flow, ipv6, 0); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), + &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + MLX5_SET(tls_flow, flow, ipv6, 1); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), + &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); +} +#endif + +static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) +{ + struct inet_sock *inet = inet_sk(sk); + + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, + MLX5_FLD_SZ_BYTES(tls_flow, src_port)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport, + MLX5_FLD_SZ_BYTES(tls_flow, dst_port)); +} + +static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps) +{ + switch (sk->sk_family) { + case AF_INET: + mlx5e_tls_set_ipv4_flow(flow, sk); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + if (!sk->sk_ipv6only && + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { + mlx5e_tls_set_ipv4_flow(flow, sk); + break; + } + if (!(caps & MLX5_ACCEL_TLS_IPV6)) + goto error_out; + + mlx5e_tls_set_ipv6_flow(flow, sk); + break; +#endif + default: + goto error_out; + } + + mlx5e_tls_set_flow_tcp_ports(flow, sk); + return 0; +error_out: + return -EINVAL; +} + +static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5_core_dev *mdev = priv->mdev; + u32 caps = mlx5_accel_tls_device_caps(mdev); + int ret = -ENOMEM; + void *flow; + + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return -EINVAL; + + flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); + if (!flow) + return ret; + + ret = mlx5e_tls_set_flow(flow, sk, caps); + if (ret) + goto free_flow; + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + struct mlx5e_tls_offload_context *tx_ctx = + mlx5e_get_tls_tx_context(tls_ctx); + u32 swid; + + ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, &swid); + if (ret < 0) + goto free_flow; + + tx_ctx->swid = htonl(swid); + tx_ctx->expected_seq = start_offload_tcp_sn; + } + + return 0; +free_flow: + kfree(flow); + return ret; +} + +static void mlx5e_tls_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); + + mlx5_accel_tls_del_tx_flow(priv->mdev, swid); + } else { + netdev_err(netdev, "unsupported direction %d\n", direction); + } +} + +static const struct tlsdev_ops mlx5e_tls_ops = { + .tls_dev_add = mlx5e_tls_add, + .tls_dev_del = mlx5e_tls_del, +}; + +void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + + if (!mlx5_accel_is_tls_device(priv->mdev)) + return; + + netdev->tlsdev_ops = &mlx5e_tls_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h new file mode 100644 index 000000000000..f7216b9b98e2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#ifndef __MLX5E_TLS_H__ +#define __MLX5E_TLS_H__ + +#ifdef CONFIG_MLX5_EN_TLS + +#include +#include "en.h" + +struct mlx5e_tls_offload_context { + struct tls_offload_context base; + u32 expected_seq; + __be32 swid; +}; + +static inline struct mlx5e_tls_offload_context * +mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > + TLS_OFFLOAD_CONTEXT_SIZE); + return container_of(tls_offload_ctx(tls_ctx), + struct mlx5e_tls_offload_context, + base); +} + +void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); + +#else + +static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { } + +#endif + +#endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f1fe490ed794..0450c8c9b447 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -42,7 +42,9 @@ #include "en_rep.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls.h" #include "accel/ipsec.h" +#include "accel/tls.h" #include "vxlan.h" struct mlx5e_rq_param { @@ -4376,6 +4378,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) #endif mlx5e_ipsec_build_netdev(priv); + mlx5e_tls_build_netdev(priv); } static void mlx5e_create_q_counters(struct mlx5e_priv *priv) -- cgit v1.2.3 From bf23974104fa7aa2f3cbd2f2295f866a71875efd Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 30 Apr 2018 10:16:20 +0300 Subject: net/mlx5e: TLS, Add Innova TLS TX offload data path Implement the TLS tx offload data path according to the requirements of the TLS generic NIC offload infrastructure. Special metadata ethertype is used to pass information to the hardware. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 15 ++ .../mellanox/mlx5/core/en_accel/en_accel.h | 72 ++++++ .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 2 + .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 272 +++++++++++++++++++++ .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 50 ++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 10 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 9 + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 37 +-- 10 files changed, 455 insertions(+), 16 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 50872ed30c0b..ec785f589666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,6 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o -mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o CFLAGS_tracepoint.o := -I$(src) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 66ecb1c81002..ca5b8523bc95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -335,6 +335,7 @@ enum { MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, + MLX5E_SQ_STATE_TLS, }; struct mlx5e_sq_wqe_info { @@ -830,6 +831,8 @@ void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi); void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); @@ -945,6 +948,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); } +static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5_wq_cyc *wq; + + wq = &sq->wq; + *pi = sq->pc & wq->sz_m1; + *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(*wqe, 0, sizeof(**wqe)); +} + static inline struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h new file mode 100644 index 000000000000..68fcb40a2847 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5E_EN_ACCEL_H__ +#define __MLX5E_EN_ACCEL_H__ + +#ifdef CONFIG_MLX5_ACCEL + +#include +#include +#include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls_rxtx.h" +#include "en.h" + +static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, + struct mlx5e_txqsq *sq, + struct net_device *dev, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ +#ifdef CONFIG_MLX5_EN_TLS + if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) { + skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); + if (unlikely(!skb)) + return NULL; + } +#endif + +#ifdef CONFIG_MLX5_EN_IPSEC + if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { + skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); + if (unlikely(!skb)) + return NULL; + } +#endif + + return skb; +} + +#endif /* CONFIG_MLX5_ACCEL */ + +#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 38d88108a55a..aa6981c98bdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -169,5 +169,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) if (!mlx5_accel_is_tls_device(priv->mdev)) return; + netdev->features |= NETIF_F_HW_TLS_TX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->tlsdev_ops = &mlx5e_tls_ops; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c new file mode 100644 index 000000000000..49e8d455ebc3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "en_accel/tls.h" +#include "en_accel/tls_rxtx.h" + +#define SYNDROME_OFFLOAD_REQUIRED 32 +#define SYNDROME_SYNC 33 + +struct sync_info { + u64 rcd_sn; + s32 sync_len; + int nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +struct mlx5e_tls_metadata { + /* One byte of syndrome followed by 3 bytes of swid */ + __be32 syndrome_swid; + __be16 first_seq; + /* packet type ID field */ + __be16 ethertype; +} __packed; + +static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) +{ + struct mlx5e_tls_metadata *pet; + struct ethhdr *eth; + + if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata))) + return -ENOMEM; + + eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata)); + skb->mac_header -= sizeof(struct mlx5e_tls_metadata); + pet = (struct mlx5e_tls_metadata *)(eth + 1); + + memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata), + 2 * ETH_ALEN); + + eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); + pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; + + return 0; +} + +static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, + u32 tcp_seq, struct sync_info *info) +{ + int remaining, i = 0, ret = -EINVAL; + struct tls_record_info *record; + unsigned long flags; + s32 sync_size; + + spin_lock_irqsave(&context->base.lock, flags); + record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); + + if (unlikely(!record)) + goto out; + + sync_size = tcp_seq - tls_record_start_seq(record); + info->sync_len = sync_size; + if (unlikely(sync_size < 0)) { + if (tls_record_is_start_marker(record)) + goto done; + + goto out; + } + + remaining = sync_size; + while (remaining > 0) { + info->frags[i] = record->frags[i]; + __skb_frag_ref(&info->frags[i]); + remaining -= skb_frag_size(&info->frags[i]); + + if (remaining < 0) + skb_frag_size_add(&info->frags[i], remaining); + + i++; + } + info->nr_frags = i; +done: + ret = 0; +out: + spin_unlock_irqrestore(&context->base.lock, flags); + return ret; +} + +static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, + struct sk_buff *nskb, u32 tcp_seq, + int headln, __be64 rcd_sn) +{ + struct mlx5e_tls_metadata *pet; + u8 syndrome = SYNDROME_SYNC; + struct iphdr *iph; + struct tcphdr *th; + int data_len, mss; + + nskb->dev = skb->dev; + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb_network_offset(skb)); + skb_set_transport_header(nskb, skb_transport_offset(skb)); + memcpy(nskb->data, skb->data, headln); + memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn)); + + iph = ip_hdr(nskb); + iph->tot_len = htons(nskb->len - skb_network_offset(nskb)); + th = tcp_hdr(nskb); + data_len = nskb->len - headln; + tcp_seq -= data_len; + th->seq = htonl(tcp_seq); + + mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); + skb_shinfo(nskb)->gso_size = 0; + if (data_len > mss) { + skb_shinfo(nskb)->gso_size = mss; + skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); + } + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + + pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); + memcpy(pet, &syndrome, sizeof(syndrome)); + pet->first_seq = htons(tcp_seq); + + /* MLX5 devices don't care about the checksum partial start, offset + * and pseudo header + */ + nskb->ip_summed = CHECKSUM_PARTIAL; + + nskb->xmit_more = 1; + nskb->queue_mapping = skb->queue_mapping; +} + +static struct sk_buff * +mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, + struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); + struct sync_info info; + struct sk_buff *nskb; + int linear_len = 0; + int headln; + int i; + + sq->stats.tls_ooo++; + + if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) + /* We might get here if a retransmission reaches the driver + * after the relevant record is acked. + * It should be safe to drop the packet in this case + */ + goto err_out; + + if (unlikely(info.sync_len < 0)) { + u32 payload; + + headln = skb_transport_offset(skb) + tcp_hdrlen(skb); + payload = skb->len - headln; + if (likely(payload <= -info.sync_len)) + /* SKB payload doesn't require offload + */ + return skb; + + netdev_err(skb->dev, + "Can't offload from the middle of an SKB [seq: %X, offload_seq: %X, end_seq: %X]\n", + tcp_seq, tcp_seq + payload + info.sync_len, + tcp_seq + payload); + goto err_out; + } + + if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) + goto err_out; + + headln = skb_transport_offset(skb) + tcp_hdrlen(skb); + linear_len += headln + sizeof(info.rcd_sn); + nskb = alloc_skb(linear_len, GFP_ATOMIC); + if (unlikely(!nskb)) + goto err_out; + + context->expected_seq = tcp_seq + skb->len - headln; + skb_put(nskb, linear_len); + for (i = 0; i < info.nr_frags; i++) + skb_shinfo(nskb)->frags[i] = info.frags[i]; + + skb_shinfo(nskb)->nr_frags = info.nr_frags; + nskb->data_len = info.sync_len; + nskb->len += info.sync_len; + sq->stats.tls_resync_bytes += nskb->len; + mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, + cpu_to_be64(info.rcd_sn)); + mlx5e_sq_xmit(sq, nskb, *wqe, *pi); + mlx5e_sq_fetch_wqe(sq, wqe, pi); + return skb; + +err_out: + dev_kfree_skb_any(skb); + return NULL; +} + +struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5e_tls_offload_context *context; + struct tls_context *tls_ctx; + u32 expected_seq; + int datalen; + u32 skb_seq; + + if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) + goto out; + + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + goto out; + + tls_ctx = tls_get_ctx(skb->sk); + if (unlikely(tls_ctx->netdev != netdev)) + goto out; + + skb_seq = ntohl(tcp_hdr(skb)->seq); + context = mlx5e_get_tls_tx_context(tls_ctx); + expected_seq = context->expected_seq; + + if (unlikely(expected_seq != skb_seq)) { + skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi); + goto out; + } + + if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { + dev_kfree_skb_any(skb); + skb = NULL; + goto out; + } + + context->expected_seq = skb_seq + datalen; +out: + return skb; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h new file mode 100644 index 000000000000..405dfd302225 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5E_TLS_RXTX_H__ +#define __MLX5E_TLS_RXTX_H__ + +#ifdef CONFIG_MLX5_EN_TLS + +#include +#include "en.h" + +struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi); + +#endif /* CONFIG_MLX5_EN_TLS */ + +#endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0450c8c9b447..5f5f97ee64b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1016,6 +1016,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); + if (mlx5_accel_is_tls_device(c->priv->mdev)) + set_bit(MLX5E_SQ_STATE_TLS, &sq->state); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b08c94422907..c04cf2b4d838 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -43,6 +43,12 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, +#endif + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, @@ -161,6 +167,10 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_none += sq_stats->csum_none; s->tx_csum_partial += sq_stats->csum_partial; +#ifdef CONFIG_MLX5_EN_TLS + s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; +#endif } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53111a2df587..a36e6a87066b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -93,6 +93,11 @@ struct mlx5e_sw_stats { u64 rx_cache_waive; u64 ch_eq_rearm; +#ifdef CONFIG_MLX5_EN_TLS + u64 tx_tls_ooo; + u64 tx_tls_resync_bytes; +#endif + /* Special handling counters */ u64 link_down_events_phy; }; @@ -194,6 +199,10 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; +#ifdef CONFIG_MLX5_EN_TLS + u64 tls_ooo; + u64 tls_resync_bytes; +#endif /* less likely accessed in data path */ u64 csum_none; u64 stopped; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 20297108528a..486b6bfba8b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -35,12 +35,21 @@ #include #include "en.h" #include "ipoib/ipoib.h" -#include "en_accel/ipsec_rxtx.h" +#include "en_accel/en_accel.h" #include "lib/clock.h" #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS + +#ifndef CONFIG_MLX5_EN_TLS #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ MLX5E_SQ_NOPS_ROOM) +#else +/* TLS offload requires MLX5E_SQ_STOP_ROOM to have + * enough room for a resync SKB, a normal SKB and a NOP + */ +#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\ + MLX5E_SQ_NOPS_ROOM) +#endif static inline void mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) @@ -325,8 +334,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, } } -static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi) +netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi) { struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; @@ -399,21 +408,19 @@ dma_unmap_wqe_err: netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe *wqe; + struct mlx5e_txqsq *sq; + u16 pi; - memset(wqe, 0, sizeof(*wqe)); + sq = priv->txq2sq[skb_get_queue_mapping(skb)]; + mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_EN_IPSEC - if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { - skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); - if (unlikely(!skb)) - return NETDEV_TX_OK; - } +#ifdef CONFIG_MLX5_ACCEL + /* might send skbs and update wqe and pi */ + skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); + if (unlikely(!skb)) + return NETDEV_TX_OK; #endif - return mlx5e_sq_xmit(sq, skb, wqe, pi); } -- cgit v1.2.3 From 43585a41bd894925abe5015edbce475beb3d8c10 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 30 Apr 2018 10:16:21 +0300 Subject: net/mlx5e: TLS, Add error statistics Add statistics for rare TLS related errors. Since the errors are rare we have a counter per netdev rather then per SQ. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 + .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 22 ++++++ .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 22 ++++++ .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 24 +++--- .../mellanox/mlx5/core/en_accel/tls_stats.c | 89 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 22 ++++++ 8 files changed, 178 insertions(+), 10 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index ec785f589666..a7135f5d5cf6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,6 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o -mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o CFLAGS_tracepoint.o := -I$(src) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ca5b8523bc95..9cc07da09b70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -801,6 +801,9 @@ struct mlx5e_priv { #ifdef CONFIG_MLX5_EN_IPSEC struct mlx5e_ipsec *ipsec; #endif +#ifdef CONFIG_MLX5_EN_TLS + struct mlx5e_tls *tls; +#endif }; struct mlx5e_profile { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index aa6981c98bdc..d167845271c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -173,3 +173,25 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->tlsdev_ops = &mlx5e_tls_ops; } + +int mlx5e_tls_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL); + + if (!tls) + return -ENOMEM; + + priv->tls = tls; + return 0; +} + +void mlx5e_tls_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls = priv->tls; + + if (!tls) + return; + + kfree(tls); + priv->tls = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index f7216b9b98e2..b6162178f621 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -38,6 +38,17 @@ #include #include "en.h" +struct mlx5e_tls_sw_stats { + atomic64_t tx_tls_drop_metadata; + atomic64_t tx_tls_drop_resync_alloc; + atomic64_t tx_tls_drop_no_sync_data; + atomic64_t tx_tls_drop_bypass_required; +}; + +struct mlx5e_tls { + struct mlx5e_tls_sw_stats sw_stats; +}; + struct mlx5e_tls_offload_context { struct tls_offload_context base; u32 expected_seq; @@ -55,10 +66,21 @@ mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) } void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); +int mlx5e_tls_init(struct mlx5e_priv *priv); +void mlx5e_tls_cleanup(struct mlx5e_priv *priv); + +int mlx5e_tls_get_count(struct mlx5e_priv *priv); +int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); +int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); #else static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { } +static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } +static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } +static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } +static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 49e8d455ebc3..ad2790fb5966 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -164,7 +164,8 @@ static struct sk_buff * mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe, - u16 *pi) + u16 *pi, + struct mlx5e_tls *tls) { u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); struct sync_info info; @@ -175,12 +176,14 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, sq->stats.tls_ooo++; - if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) + if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { /* We might get here if a retransmission reaches the driver * after the relevant record is acked. * It should be safe to drop the packet in this case */ + atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); goto err_out; + } if (unlikely(info.sync_len < 0)) { u32 payload; @@ -192,21 +195,22 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, */ return skb; - netdev_err(skb->dev, - "Can't offload from the middle of an SKB [seq: %X, offload_seq: %X, end_seq: %X]\n", - tcp_seq, tcp_seq + payload + info.sync_len, - tcp_seq + payload); + atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); goto err_out; } - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) + if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { + atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); goto err_out; + } headln = skb_transport_offset(skb) + tcp_hdrlen(skb); linear_len += headln + sizeof(info.rcd_sn); nskb = alloc_skb(linear_len, GFP_ATOMIC); - if (unlikely(!nskb)) + if (unlikely(!nskb)) { + atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); goto err_out; + } context->expected_seq = tcp_seq + skb->len - headln; skb_put(nskb, linear_len); @@ -234,6 +238,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe **wqe, u16 *pi) { + struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_tls_offload_context *context; struct tls_context *tls_ctx; u32 expected_seq; @@ -256,11 +261,12 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, expected_seq = context->expected_seq; if (unlikely(expected_seq != skb_seq)) { - skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi); + skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); goto out; } if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { + atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); dev_kfree_skb_any(skb); skb = NULL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c new file mode 100644 index 000000000000..01468ec27446 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "en.h" +#include "accel/tls.h" +#include "fpga/sdk.h" +#include "en_accel/tls.h" + +static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, +}; + +#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ + atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) + +#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) + +int mlx5e_tls_get_count(struct mlx5e_priv *priv) +{ + if (!priv->tls) + return 0; + + return NUM_TLS_SW_COUNTERS; +} + +int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + unsigned int i, idx = 0; + + if (!priv->tls) + return 0; + + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + mlx5e_tls_sw_stats_desc[i].format); + + return NUM_TLS_SW_COUNTERS; +} + +int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) +{ + int i, idx = 0; + + if (!priv->tls) + return 0; + + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, + mlx5e_tls_sw_stats_desc, i); + + return NUM_TLS_SW_COUNTERS; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5f5f97ee64b2..553eb63753ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4422,12 +4422,16 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); + err = mlx5e_tls_init(priv); + if (err) + mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); mlx5e_vxlan_init(priv); } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { + mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_vxlan_cleanup(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index c04cf2b4d838..e17919c0af08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -32,6 +32,7 @@ #include "en.h" #include "en_accel/ipsec.h" +#include "en_accel/tls.h" static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, @@ -1075,6 +1076,22 @@ static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv) mlx5e_ipsec_update_stats(priv); } +static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv) +{ + return mlx5e_tls_get_count(priv); +} + +static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); +} + +static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +{ + return idx + mlx5e_tls_get_stats(priv, data + idx); +} + static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, @@ -1277,6 +1294,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_stats = mlx5e_grp_ipsec_fill_stats, .update_stats = mlx5e_grp_ipsec_update_stats, }, + { + .get_num_stats = mlx5e_grp_tls_get_num_stats, + .fill_strings = mlx5e_grp_tls_fill_strings, + .fill_stats = mlx5e_grp_tls_fill_stats, + }, { .get_num_stats = mlx5e_grp_channels_get_num_stats, .fill_strings = mlx5e_grp_channels_fill_strings, -- cgit v1.2.3 From efacb568c962470894dcda2bc51ea42af96a39eb Mon Sep 17 00:00:00 2001 From: Yixun Lan Date: Sat, 28 Apr 2018 10:21:11 +0000 Subject: net: stmmac: dwmac-meson: extend phy mode setting In the Meson-AXG SoC, the phy mode setting of PRG_ETH0 in the glue layer is extended from bit[0] to bit[2:0]. There is no problem if we configure it to the RGMII 1000M PHY mode, since the register setting is coincidentally compatible with previous one, but for the RMII 100M PHY mode, the configuration need to be changed to value - b100. This patch was verified with a RTL8201F 100M ethernet PHY. Signed-off-by: Yixun Lan Acked-by: Martin Blumenstingl Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 120 ++++++++++++++++++--- 1 file changed, 104 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 7cb794094a70..4ff231df7322 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,10 @@ #define PRG_ETH0_RGMII_MODE BIT(0) +#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0) +#define PRG_ETH0_EXT_RGMII_MODE 1 +#define PRG_ETH0_EXT_RMII_MODE 4 + /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ #define PRG_ETH0_CLK_M250_SEL_SHIFT 4 #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) @@ -47,12 +52,20 @@ #define MUX_CLK_NUM_PARENTS 2 +struct meson8b_dwmac; + +struct meson8b_dwmac_data { + int (*set_phy_mode)(struct meson8b_dwmac *dwmac); +}; + struct meson8b_dwmac { - struct device *dev; - void __iomem *regs; - phy_interface_t phy_mode; - struct clk *rgmii_tx_clk; - u32 tx_delay_ns; + struct device *dev; + void __iomem *regs; + + const struct meson8b_dwmac_data *data; + phy_interface_t phy_mode; + struct clk *rgmii_tx_clk; + u32 tx_delay_ns; }; struct meson8b_dwmac_clk_configs { @@ -171,6 +184,59 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) return 0; } +static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac) +{ + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* enable RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_RGMII_MODE, + PRG_ETH0_RGMII_MODE); + break; + case PHY_INTERFACE_MODE_RMII: + /* disable RGMII mode -> enables RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_RGMII_MODE, 0); + break; + default: + dev_err(dwmac->dev, "fail to set phy-mode %s\n", + phy_modes(dwmac->phy_mode)); + return -EINVAL; + } + + return 0; +} + +static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac) +{ + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* enable RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_EXT_PHY_MODE_MASK, + PRG_ETH0_EXT_RGMII_MODE); + break; + case PHY_INTERFACE_MODE_RMII: + /* disable RGMII mode -> enables RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_EXT_PHY_MODE_MASK, + PRG_ETH0_EXT_RMII_MODE); + break; + default: + dev_err(dwmac->dev, "fail to set phy-mode %s\n", + phy_modes(dwmac->phy_mode)); + return -EINVAL; + } + + return 0; +} + static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; @@ -188,10 +254,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: - /* enable RGMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, - PRG_ETH0_RGMII_MODE); - /* only relevant for RMII mode -> disable in RGMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); @@ -224,10 +286,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) break; case PHY_INTERFACE_MODE_RMII: - /* disable RGMII mode -> enables RMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, - 0); - /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, @@ -274,6 +332,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + dwmac->data = (const struct meson8b_dwmac_data *) + of_device_get_match_data(&pdev->dev); + if (!dwmac->data) + return -EINVAL; + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->regs)) { @@ -298,6 +361,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) goto err_remove_config_dt; + ret = dwmac->data->set_phy_mode(dwmac); + if (ret) + goto err_remove_config_dt; + ret = meson8b_init_prg_eth(dwmac); if (ret) goto err_remove_config_dt; @@ -316,10 +383,31 @@ err_remove_config_dt: return ret; } +static const struct meson8b_dwmac_data meson8b_dwmac_data = { + .set_phy_mode = meson8b_set_phy_mode, +}; + +static const struct meson8b_dwmac_data meson_axg_dwmac_data = { + .set_phy_mode = meson_axg_set_phy_mode, +}; + static const struct of_device_id meson8b_dwmac_match[] = { - { .compatible = "amlogic,meson8b-dwmac" }, - { .compatible = "amlogic,meson8m2-dwmac" }, - { .compatible = "amlogic,meson-gxbb-dwmac" }, + { + .compatible = "amlogic,meson8b-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson8m2-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson-gxbb-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson-axg-dwmac", + .data = &meson_axg_dwmac_data, + }, { } }; MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); -- cgit v1.2.3 From 206703289a5712a5645558ac7ae24bd29ca69178 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 1 May 2018 19:55:57 +0100 Subject: net: hns3: Remove error log when getting pfc stats fails When mac supports DCB, but is in GE mode, it does not support querying pfc stats, firmware returns error when trying to query the pfc stats. this creates a lot of noise in the kernel log when it prints the error log. This patch fixes it by removing the error log, because it already return the error to the user space, so the user should be aware of the error. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 885f25cd7be4..c69ecab460f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev, } ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get pfc pause stats fail, ret = %d.\n", ret); + if (ret) return ret; - } for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { struct hclge_pfc_stats_cmd *pfc_stats = -- cgit v1.2.3 From 35f58fd792d7fb15f5c036c7b191b0f44d644d75 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Tue, 1 May 2018 19:55:58 +0100 Subject: net: hns3: fix to correctly fetch l4 protocol outer header This patch fixes the function being used to fetch L4 protocol outer header. Mistakenly skb_inner_transport_header API was being used earlier. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8c55965a66ac..c4e295094da7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -502,7 +502,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, /* find outer header point */ l3.hdr = skb_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); + l4_hdr = skb_transport_header(skb); if (skb->protocol == htons(ETH_P_IPV6)) { exthdr = l3.hdr + sizeof(*l3.v6); -- cgit v1.2.3 From 38e62046d4c95272e2fb001d2d72baf48fa090e9 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Tue, 1 May 2018 19:55:59 +0100 Subject: net: hns3: Fixes the out of bounds access in hclge_map_tqp This patch fixes the handling of the check when number of vports are detected to be more than available TPQs. Current handling causes an out of bounds access in hclge_map_tqp(). Fixes: 7df7dad633e2 ("net: hns3: Refactor the mapping of tqp to vport") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2066dd734444..c9e80ca6d98a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1459,8 +1459,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) /* We need to alloc a vport for main NIC of PF */ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; - if (hdev->num_tqps < num_vport) - num_vport = hdev->num_tqps; + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } /* Alloc the same number of TQPs for every vport */ tqp_per_vport = hdev->num_tqps / num_vport; -- cgit v1.2.3 From ffd5656e182b94148998b48f748112450c9ff2b0 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Tue, 1 May 2018 19:56:00 +0100 Subject: net: hns3: Fixes the error legs in hclge_init_ae_dev function This patch fixes some of the missed error legs in the initialization function of the ae device. This might cause leaks in case of failure. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 55 +++++++++++++--------- 1 file changed, 34 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c9e80ca6d98a..b5e0c58fe0c2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5430,7 +5430,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) { ret = -ENOMEM; - goto err_hclge_dev; + goto out; } hdev->pdev = pdev; @@ -5443,38 +5443,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI init failed\n"); - goto err_pci_init; + goto out; } /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) { dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); - return ret; + goto err_pci_uninit; } /* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) - goto err_cmd_init; + goto err_cmd_uninit; ret = hclge_get_cap(hdev); if (ret) { dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_configure(hdev); if (ret) { dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_misc_irq_init(hdev); @@ -5482,69 +5482,69 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_err(&pdev->dev, "Misc IRQ(vector0) init error, ret = %d.\n", ret); - return ret; + goto err_msi_uninit; } ret = hclge_alloc_tqps(hdev); if (ret) { dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); - return ret; + goto err_msi_irq_uninit; } ret = hclge_alloc_vport(hdev); if (ret) { dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); - return ret; + goto err_msi_irq_uninit; } ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); - return ret; + goto err_sriov_disable; } ret = hclge_mac_mdio_config(hdev); if (ret) { dev_warn(&hdev->pdev->dev, "mdio config fail ret=%d\n", ret); - return ret; + goto err_sriov_disable; } ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_tm_schd_init(hdev); if (ret) { dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } hclge_rss_init_cfg(hdev); ret = hclge_rss_init_hw(hdev); if (ret) { dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = init_mgr_tbl(hdev); if (ret) { dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } hclge_dcb_ops_set(hdev); @@ -5567,11 +5567,24 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; -err_cmd_init: +err_mdiobus_unreg: + if (hdev->hw.mac.phydev) + mdiobus_unregister(hdev->hw.mac.mdio_bus); +err_sriov_disable: + if (IS_ENABLED(CONFIG_PCI_IOV)) + hclge_disable_sriov(hdev); +err_msi_irq_uninit: + hclge_misc_irq_uninit(hdev); +err_msi_uninit: + pci_free_irq_vectors(pdev); +err_cmd_uninit: + hclge_destroy_cmd_queue(&hdev->hw); +err_pci_uninit: + pci_clear_master(pdev); pci_release_regions(pdev); -err_pci_init: + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); -err_hclge_dev: +out: return ret; } -- cgit v1.2.3 From c5ef83cbb1e9fa17a3b78fb2deb25ae8beebbf48 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Tue, 1 May 2018 19:56:01 +0100 Subject: net: hns3: fix for phy_addr error in hclge_mac_mdio_config When phy exists, phy_addr must less than PHY_MAX_ADDR. If not, hclge_mac_mdio_config should return error. And for fiber(phy_addr=0xff), it does not need hclge_mac_mdio_config. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 12 +++++++----- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 7 +++++-- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b5e0c58fe0c2..cc09713b89ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5503,11 +5503,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_sriov_disable; } - ret = hclge_mac_mdio_config(hdev); - if (ret) { - dev_warn(&hdev->pdev->dev, - "mdio config fail ret=%d\n", ret); - goto err_sriov_disable; + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + goto err_sriov_disable; + } } ret = hclge_mac_init(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 682c2d6618e7..9f7932e423b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) - return 0; + if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", + hdev->hw.mac.phy_addr); + return -EINVAL; + } mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); if (!mdio_bus) -- cgit v1.2.3 From 0c963e8c20ce368ddedc22cbfa710315d715dfae Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 1 May 2018 19:56:02 +0100 Subject: net: hns3: Fix to support autoneg only for port attached with phy This patch adds a check to support autoneg(ethtool -A) only when PHY is attached with the port. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index cc09713b89ad..a4e999180117 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5169,12 +5169,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, struct phy_device *phydev = hdev->hw.mac.phydev; u32 fc_autoneg; - /* Only support flow control negotiation for netdev with - * phy attached for now. - */ - if (!phydev) - return -EOPNOTSUPP; - fc_autoneg = hclge_get_autoneg(handle); if (auto_neg != fc_autoneg) { dev_info(&hdev->pdev->dev, @@ -5193,6 +5187,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, if (!fc_autoneg) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + /* Only support flow control negotiation for netdev with + * phy attached for now. + */ + if (!phydev) + return -EOPNOTSUPP; + return phy_start_aneg(phydev); } -- cgit v1.2.3 From 3ff504908f95093cc013a01d1cafb1f7fda0ab34 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Tue, 1 May 2018 19:56:03 +0100 Subject: net: hns3: fix a dead loop in hclge_cmd_csq_clean If head has invlid value then a dead loop can be triggered in hclge_cmd_csq_clean. This patch adds sanity check for this case. Fixes: 68c0a5c70614 ("net: hns3: Add HNS3 IMP(Integrated Mgmt Proc) Cmd Interface Support") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ff13d1876d9e..fab70683bbf7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring) return ring->desc_num - used - 1; } +static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h >= ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); @@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { + struct hclge_dev *hdev = (struct hclge_dev *)hw->back; struct hclge_cmq_ring *csq = &hw->cmq.csq; u16 ntc = csq->next_to_clean; struct hclge_desc *desc; @@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (!is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + return 0; + } while (head != ntc) { memset(desc, 0, sizeof(*desc)); -- cgit v1.2.3 From dc8131d846d45dabc39dadac32407b321960791f Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 1 May 2018 19:56:04 +0100 Subject: net: hns3: Fix for packet loss due wrong filter config in VLAN tbls There are two level of vlan tables in hardware, one is port vlan which is shared by all functions, the other one is function vlan table, each function has it's own function vlan table. Currently, PF sets the port vlan table, and vf sets the function vlan table, which will cause packet lost problem. This patch fixes this problem by setting both vlan table, and use hdev->vlan_table to manage thet port vlan table. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 79 +++++++++++++++++----- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 8 ++- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 7 +- 3 files changed, 70 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a4e999180117..77d9e4c02534 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4543,8 +4543,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); } -int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, - bool is_kill, u16 vlan, u8 qos, __be16 proto) +static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, + bool is_kill, u16 vlan, u8 qos, + __be16 proto) { #define HCLGE_MAX_VF_BYTES 16 struct hclge_vlan_filter_vf_cfg_cmd *req0; @@ -4602,12 +4603,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, return -EIO; } -static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, - __be16 proto, u16 vlan_id, - bool is_kill) +static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, + u16 vlan_id, bool is_kill) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; struct hclge_vlan_filter_pf_cfg_cmd *req; struct hclge_desc desc; u8 vlan_offset_byte_val; @@ -4627,22 +4625,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, + u16 vport_id, u16 vlan_id, u8 qos, + bool is_kill) +{ + u16 vport_idx, vport_num = 0; + int ret; + + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, + 0, proto); if (ret) { dev_err(&hdev->pdev->dev, - "port vlan command, send fail, ret =%d.\n", - ret); + "Set %d vport vlan filter config fail, ret =%d.\n", + vport_id, ret); return ret; } - ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); - if (ret) { + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return 0; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { dev_err(&hdev->pdev->dev, - "Set pf vlan filter config fail, ret =%d.\n", - ret); - return -EIO; + "Add port vlan failed, vport %d is already in vlan %d\n", + vport_id, vlan_id); + return -EINVAL; } - return 0; + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_err(&hdev->pdev->dev, + "Delete port vlan failed, vport %d is not in vlan %d\n", + vport_id, vlan_id); + return -EINVAL; + } + + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + vport_num++; + + if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) + ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, + is_kill); + + return ret; +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, + 0, is_kill); } static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, @@ -4656,7 +4698,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); + return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); } static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) @@ -4821,7 +4863,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) } handle = &hdev->vport[0].nic; - return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) @@ -5604,6 +5646,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); ret = hclge_cmd_init(hdev); if (ret) { @@ -6221,7 +6264,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fw_version = hclge_get_fw_version, .get_mdix_mode = hclge_get_mdix_mode, .enable_vlan_filter = hclge_enable_vlan_filter, - .set_vlan_filter = hclge_set_port_vlan_filter, + .set_vlan_filter = hclge_set_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 0f4157e71282..6432f754c1c6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,6 +12,8 @@ #include #include #include +#include + #include "hclge_cmd.h" #include "hnae3.h" @@ -471,6 +473,7 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +#define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -562,6 +565,7 @@ struct hclge_dev { u64 rx_pkts_for_led; u64 tx_pkts_for_led; + unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; }; /* VPort level vlan tag configuration for TX direction */ @@ -646,8 +650,8 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) } int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); -int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, - bool is_kill, u16 vlan, u8 qos, __be16 proto); +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a6f7ffa9c259..7563335b0c7f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -264,19 +264,18 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { - struct hclge_dev *hdev = vport->back; int status = 0; if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { + struct hnae3_handle *handle = &vport->nic; u16 vlan, proto; bool is_kill; is_kill = !!mbx_req->msg[2]; memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); - status = hclge_set_vf_vlan_common(hdev, vport->vport_id, - is_kill, vlan, 0, - cpu_to_be16(proto)); + status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), + vlan, is_kill); } if (gen_resp) -- cgit v1.2.3 From dbecc7796cf1ade372e1dec006031feb56fff140 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 1 May 2018 19:56:05 +0100 Subject: net: hns3: Remove packet statistics in the range of 8192~12287 Because the current statistics for size 8192~12287 are only valid for GE, the ranges of 8192~9216 and 9217~12287 are valid only for LGE/CGE, and are always 0 for GE interfaces. it is easy to cause confusion when viewing the packet statistics using the command ethtool -S. This patch removes the 8192~12287 range of packet statistics and uses the 8192~9216 and 9217~12287 ranges for statistics. This change depends on the firmware upgrade. Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 ---- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 12 ++++++------ 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 77d9e4c02534..dd5d65c9cca6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -304,8 +304,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, - {"mac_tx_8192_12287_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)}, {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, {"mac_tx_9217_12287_oct_pkt_num", @@ -356,8 +354,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, - {"mac_rx_8192_12287_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)}, {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, {"mac_rx_9217_12287_oct_pkt_num", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6432f754c1c6..b7ee91daea0c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -408,9 +408,9 @@ struct hclge_mac_stats { u64 mac_tx_1519_2047_oct_pkt_num; u64 mac_tx_2048_4095_oct_pkt_num; u64 mac_tx_4096_8191_oct_pkt_num; - u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */ - u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ - u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */ + u64 rsv0; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; u64 mac_tx_12288_16383_oct_pkt_num; u64 mac_tx_1519_max_good_oct_pkt_num; u64 mac_tx_1519_max_bad_oct_pkt_num; @@ -435,9 +435,9 @@ struct hclge_mac_stats { u64 mac_rx_1519_2047_oct_pkt_num; u64 mac_rx_2048_4095_oct_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num; - u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */ - u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ - u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 rsv1; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num; u64 mac_rx_1519_max_good_oct_pkt_num; u64 mac_rx_1519_max_bad_oct_pkt_num; -- cgit v1.2.3 From 795d8098d32b6bef3d0821588cb6e4b1f369a7a4 Mon Sep 17 00:00:00 2001 From: Raghu Vatsavayi Date: Tue, 1 May 2018 10:32:10 -0700 Subject: liquidio VF: indicate that disabling rx vlan offload is not allowed NIC firmware does not support disabling rx vlan offload, but the VF driver incorrectly indicates that it is supported. The PF driver already does the correct indication by clearing the NETIF_F_HW_VLAN_CTAG_RX bit in its netdev->hw_features. So just do the same thing in the VF. Signed-off-by: Raghu Vatsavayi Acked-by: Prasad Kanneganti Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 08b682b1c8ad..6295eeec795c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -2100,6 +2100,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->hw_features = lio->dev_capability; + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; /* MTU range: 68 - 16000 */ netdev->min_mtu = LIO_MIN_MTU_SIZE; -- cgit v1.2.3 From 8ac60ffb9a83c8f0f6b3bab5c205d75a32914904 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 1 May 2018 14:01:30 -0700 Subject: net: stmmac: Avoid VLA usage In the quest to remove all stack VLAs from the kernel[1], this switches the "status" stack buffer to use the existing small (8) upper bound on how many queues can be checked for DMA, and adds a sanity-check just to make sure it doesn't operate under pathological conditions. [1] http://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook Reviewed-by: Jose Abreu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0135fd3aa6ef..b935478df55f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2022,7 +2022,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) tx_channel_count : rx_channel_count; u32 chan; bool poll_scheduled = false; - int status[channels_to_check]; + int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + + /* Make sure we never check beyond our status buffer. */ + if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) + channels_to_check = ARRAY_SIZE(status); /* Each DMA channel can be used for rx and tx simultaneously, yet * napi_struct is embedded in struct stmmac_rx_queue rather than in a -- cgit v1.2.3 From 794451c1b57fa3871f51b62c0e430f44073e7cfc Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 2 May 2018 11:47:15 +0530 Subject: cxgb4: add new T5 device id's Add device id's 0x5019, 0x501a and 0x501b for T5 cards. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 51b18035d691..90b5274a8ba1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -145,6 +145,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ -- cgit v1.2.3 From 6290182b2b3bda119ecb243fb3d6bcc98a74344f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 May 2018 10:17:34 +0300 Subject: mlxsw: spectrum_router: Return an error for non-default FIB rules Since commit 9776d32537d2 ("net: Move call_fib_rule_notifiers up in fib_nl_newrule") it is possible to forbid the installation of unsupported FIB rules. Have mlxsw return an error for non-default FIB rules in addition to the existing extack message. Example: # ip rule add from 198.51.100.1 table 10 Error: mlxsw_spectrum: FIB rules not supported. Note that offload is only aborted when non-default FIB rules are already installed and merely replayed during module initialization. Signed-off-by: Ido Schimmel Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8e4edb634b11..added380e344 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5882,24 +5882,24 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, switch (info->family) { case AF_INET: if (!fib4_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case AF_INET6: if (!fib6_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case RTNL_FAMILY_IPMR: if (!ipmr_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case RTNL_FAMILY_IP6MR: if (!ip6mr_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; } if (err < 0) - NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload"); + NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported"); return err; } @@ -5926,8 +5926,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, case FIB_EVENT_RULE_DEL: err = mlxsw_sp_router_fib_rule_event(event, info, router->mlxsw_sp); - if (!err) - return NOTIFY_DONE; + if (!err || info->extack) + return notifier_from_errno(err); } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); -- cgit v1.2.3 From 50d10711cf1b0bf82f274b0901f54e7ff030b740 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 May 2018 10:17:35 +0300 Subject: mlxsw: spectrum_router: Return an error for routes added after abort We currently do not perform accounting in the driver and thus can't reject routes before resources are exceeded. However, in order to make users aware of the fact that routes are no longer offloaded we can return an error for routes configured after the abort mechanism was triggered. Signed-off-by: Ido Schimmel Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index added380e344..8028d221aece 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5928,6 +5928,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, router->mlxsw_sp); if (!err || info->extack) return notifier_from_errno(err); + break; + case FIB_EVENT_ENTRY_ADD: + if (router->aborted) { + NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); + return notifier_from_errno(-EINVAL); + } + break; } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); -- cgit v1.2.3 From 6851d025e514089bf8d2ea80048d0c928f4bbf02 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:35 +0200 Subject: r8169: remove unneeded check in r8168_pll_power_down RTL_GIGA_MAC_VER_23/24 are configured by rtl_hw_start_8168cp_2() and rtl_hw_start_8168cp_3() respectively which both apply CPCMD_QUIRK_MASK, thus clearing bit ASF. Bit ASF isn't set at any other place in the driver, therefore this check can be removed. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 66f10d119b12..ce7843aab81d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4838,12 +4838,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (r8168_check_dash(tp)) return; - if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || - tp->mac_version == RTL_GIGA_MAC_VER_24) && - (tp->cp_cmd & ASF)) { - return; - } - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(tp, 0x19, 0xff64); -- cgit v1.2.3 From 40242e232e153dc4979721a89c286be613f2ae87 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:40 +0200 Subject: r8169: remove 810x_phy_power_up/down The functionality of 810x_phy_power_up/down is covered by the default clause in 8168_phy_power_up/down. Therefore we don't need these functions. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 98 ++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 55 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ce7843aab81d..58559a004850 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4718,61 +4718,6 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) return true; } -static void r810x_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); -} - -static void r810x_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); -} - -static void r810x_pll_power_down(struct rtl8169_private *tp) -{ - if (rtl_wol_pll_power_down(tp)) - return; - - r810x_phy_power_down(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); - break; - } -} - -static void r810x_pll_power_up(struct rtl8169_private *tp) -{ - r810x_phy_power_up(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); - break; - } -} - static void r8168_phy_power_up(struct rtl8169_private *tp) { rtl_writephy(tp, 0x1f, 0x0000); @@ -4833,6 +4778,49 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) } } +static void r810x_pll_power_down(struct rtl8169_private *tp) +{ + if (rtl_wol_pll_power_down(tp)) + return; + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + } +} + +static void r810x_pll_power_up(struct rtl8169_private *tp) +{ + r8168_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + break; + default: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); + break; + } +} + static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) -- cgit v1.2.3 From 73570bf19faae9da0941ba4a10989dfe085119ec Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:45 +0200 Subject: r8169: merge r810x_pll_power_down/up into r8168_pll_power_down/up r810x_pll_power_down/up and r8168_pll_power_down/up have a lot in common, so we can simplify the code by merging the former into the latter. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 61 ++++++++++-------------------------- 1 file changed, 16 insertions(+), 45 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 58559a004850..0585a9c6edcc 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4778,49 +4778,6 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) } } -static void r810x_pll_power_down(struct rtl8169_private *tp) -{ - if (rtl_wol_pll_power_down(tp)) - return; - - r8168_phy_power_down(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); - break; - } -} - -static void r810x_pll_power_up(struct rtl8169_private *tp) -{ - r8168_phy_power_up(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); - break; - } -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@ -4840,12 +4797,19 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); @@ -4867,14 +4831,21 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); @@ -4925,8 +4896,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - ops->down = r810x_pll_power_down; - ops->up = r810x_pll_power_up; + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; break; case RTL_GIGA_MAC_VER_11: -- cgit v1.2.3 From 4f447d296982e5776c4fbaf1cc56c5402b86ccb7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:47 +0200 Subject: r8169: drop member pll_power_ops from struct rtl8169_private After merging r810x_pll_power_down/up and r8168_pll_power_down/up we don't need member pll_power_ops any longer and can drop it, thus simplifying the code. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 76 +++++------------------------------- 1 file changed, 10 insertions(+), 66 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0585a9c6edcc..3573d8256ff9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -776,11 +776,6 @@ struct rtl8169_private { int (*read)(struct rtl8169_private *, int); } mdio_ops; - struct pll_power_ops { - void (*down)(struct rtl8169_private *); - void (*up)(struct rtl8169_private *); - } pll_power_ops; - struct jumbo_ops { void (*enable)(struct rtl8169_private *); void (*disable)(struct rtl8169_private *); @@ -4871,73 +4866,23 @@ static void rtl_generic_op(struct rtl8169_private *tp, static void rtl_pll_power_down(struct rtl8169_private *tp) { - rtl_generic_op(tp, tp->pll_power_ops.down); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: + break; + default: + r8168_pll_power_down(tp); + } } static void rtl_pll_power_up(struct rtl8169_private *tp) { - rtl_generic_op(tp, tp->pll_power_ops.up); -} - -static void rtl_init_pll_power_ops(struct rtl8169_private *tp) -{ - struct pll_power_ops *ops = &tp->pll_power_ops; - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - ops->down = r8168_pll_power_down; - ops->up = r8168_pll_power_up; + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - ops->down = r8168_pll_power_down; - ops->up = r8168_pll_power_up; - break; - default: - ops->down = NULL; - ops->up = NULL; - break; + r8168_pll_power_up(tp); } } @@ -8027,7 +7972,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); rtl_init_mdio_ops(tp); - rtl_init_pll_power_ops(tp); rtl_init_jumbo_ops(tp); rtl_init_csi_ops(tp); -- cgit v1.2.3 From 2a71883c4fd73a5d235363e6f8db118532d2d001 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:49 +0200 Subject: r8169: simplify code by using ranges in switch clauses Several switch statements can be significantly simplified by using case ranges. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 193 ++++------------------------------- 1 file changed, 19 insertions(+), 174 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3573d8256ff9..9c92b018b1a3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1611,23 +1611,8 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) if (options & LinkUp) wolopts |= WAKE_PHY; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) wolopts |= WAKE_MAGIC; break; @@ -1688,23 +1673,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: tmp = ARRAY_SIZE(cfg) - 1; if (wolopts & WAKE_MAGIC) rtl_w0w1_eri(tp, @@ -4623,18 +4593,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) ops->write = r8168dp_2_mdio_write; ops->read = r8168dp_2_mdio_read; break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -4679,21 +4638,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -4719,18 +4664,7 @@ static void r8168_phy_power_up(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_writephy(tp, 0x0e, 0x0000); break; @@ -4753,18 +4687,7 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_writephy(tp, 0x0e, 0x0200); default: @@ -4788,15 +4711,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) r8168_phy_power_down(tp); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: @@ -4822,15 +4737,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) static void r8168_pll_power_up(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: @@ -4889,45 +4796,16 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) static void rtl_init_rxcfg(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - case RTL_GIGA_MAC_VER_04: - case RTL_GIGA_MAC_VER_05: - case RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_14: - case RTL_GIGA_MAC_VER_15: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -5064,18 +4942,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) * No action needed for jumbo frames with 8169. * No jumbo for 810x at all. */ - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: default: ops->disable = NULL; ops->enable = NULL; @@ -5438,20 +5305,8 @@ static void rtl_init_csi_ops(struct rtl8169_private *tp) struct csi_ops *ops = &tp->csi_ops; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - case RTL_GIGA_MAC_VER_04: - case RTL_GIGA_MAC_VER_05: - case RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_14: - case RTL_GIGA_MAC_VER_15: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: ops->write = NULL; ops->read = NULL; break; @@ -7843,20 +7698,10 @@ static void rtl_hw_init_8168ep(struct rtl8169_private *tp) static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51: rtl_hw_init_8168ep(tp); break; default: -- cgit v1.2.3 From b2d43e6ecf6630e39f35573dce0ff9ae5af9e1a8 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:52 +0200 Subject: r8169: replace longer if statements with switch statements Some longer if statements can be simplified by using switch statements instead. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 54 +++++++++++------------------------- 1 file changed, 16 insertions(+), 38 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9c92b018b1a3..41f9b5c2bc0d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5028,32 +5028,21 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) rtl_rx_close(tp); - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); - } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_38 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_45 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_47 || - tp->mac_version == RTL_GIGA_MAC_VER_48 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) { + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); - } else { + break; + default: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); udelay(100); + break; } rtl_hw_reset(tp); @@ -7854,29 +7843,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u64_stats_init(&tp->tx_stats.syncp); /* Get MAC address */ - if (tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_38 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_45 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_47 || - tp->mac_version == RTL_GIGA_MAC_VER_48 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) { + switch (tp->mac_version) { u16 mac_addr[3]; - + case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); if (is_valid_ether_addr((u8 *)mac_addr)) rtl_rar_set(tp, (u8 *)mac_addr); + break; + default: + break; } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); -- cgit v1.2.3 From eda40b8cbe8e4b099233ce30535cb4c835896637 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:54 +0200 Subject: r8169: drop rtl_generic_op Only two places are left where rtl_generic_op() is used, so we can inline it and simplify the code a little. This change also avoids the overhead of unlocking/locking in case the respective operation isn't set. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 41f9b5c2bc0d..7703503e61ff 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4764,13 +4764,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) r8168_phy_power_up(tp); } -static void rtl_generic_op(struct rtl8169_private *tp, - void (*op)(struct rtl8169_private *)) -{ - if (op) - op(tp); -} - static void rtl_pll_power_down(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -4821,16 +4814,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - rtl_generic_op(tp, tp->jumbo_ops.enable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + if (tp->jumbo_ops.enable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.enable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - rtl_generic_op(tp, tp->jumbo_ops.disable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + if (tp->jumbo_ops.disable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.disable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } } static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) -- cgit v1.2.3 From ff1d733155da3fdba29e2ba99be428da86070989 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:56 +0200 Subject: r8169: improve PCI config space access Some chips have a non-zero function id, however instead of hardcoding the id's (CSIAR_FUNC_NIC and CSIAR_FUNC_NIC2) we can get them dynamically via PCI_FUNC(pci_dev->devfn). This way we can get rid of the csi_ops. In general csi is just a fallback mechanism for PCI config space access in case no native access is supported. Therefore let's try native access first. I checked with Realtek regarding the functionality of config space byte 0x070f and according to them it controls the L0s/L1 entrance latency. Currently ASPM is disabled in general and therefore this value isn't used. However we may introduce a whitelist for chips where ASPM is known to work, therefore let's keep this code. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 132 ++++++++--------------------------- 1 file changed, 29 insertions(+), 103 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7703503e61ff..d72b3fdf853a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -410,13 +410,8 @@ enum rtl8168_8101_registers { CSIAR = 0x68, #define CSIAR_FLAG 0x80000000 #define CSIAR_WRITE_CMD 0x80000000 -#define CSIAR_BYTE_ENABLE 0x0f -#define CSIAR_BYTE_ENABLE_SHIFT 12 -#define CSIAR_ADDR_MASK 0x0fff -#define CSIAR_FUNC_CARD 0x00000000 -#define CSIAR_FUNC_SDIO 0x00010000 -#define CSIAR_FUNC_NIC 0x00020000 -#define CSIAR_FUNC_NIC2 0x00010000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -781,11 +776,6 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; - struct csi_ops { - void (*write)(struct rtl8169_private *, int, int); - u32 (*read)(struct rtl8169_private *, int); - } csi_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); @@ -5196,123 +5186,60 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) RTL_W32(tp, RxMissed, 0); } -static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) -{ - if (tp->csi_ops.write) - tp->csi_ops.write(tp, addr, value); -} - -static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) -{ - return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; -} - -static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) -{ - u32 csi; - - csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; - rtl_csi_write(tp, 0x070c, csi | bits); -} - -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17000000); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x27000000); -} - DECLARE_RTL_COND(rtl_csiar_cond) { return RTL_R32(tp, CSIAR) & CSIAR_FLAG; } -static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) -{ - RTL_W32(tp, CSIDR, value); - RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); -} - -static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + u32 func = PCI_FUNC(tp->pci_dev->devfn); - return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(tp, CSIDR) : ~0; -} - -static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) -{ RTL_W32(tp, CSIDR, value); RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | - CSIAR_FUNC_NIC); + CSIAR_BYTE_ENABLE | func << 16); rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); } -static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? RTL_R32(tp, CSIDR) : ~0; } -static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) { - RTL_W32(tp, CSIDR, value); - RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | - CSIAR_FUNC_NIC2); + struct pci_dev *pdev = tp->pci_dev; + u32 csi; - rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); } -static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(tp, CSIDR) : ~0; + rtl_csi_access_enable(tp, 0x17); } -static void rtl_init_csi_ops(struct rtl8169_private *tp) +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) { - struct csi_ops *ops = &tp->csi_ops; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: - ops->write = NULL; - ops->read = NULL; - break; - - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - ops->write = r8402_csi_write; - ops->read = r8402_csi_read; - break; - - case RTL_GIGA_MAC_VER_44: - ops->write = r8411_csi_write; - ops->read = r8411_csi_read; - break; - - default: - ops->write = r8169_csi_write; - ops->read = r8169_csi_read; - break; - } + rtl_csi_access_enable(tp, 0x27); } struct ephy_info { @@ -7804,7 +7731,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_mdio_ops(tp); rtl_init_jumbo_ops(tp); - rtl_init_csi_ops(tp); rtl8169_print_mac_version(tp); -- cgit v1.2.3 From 353af85ed811d84ca349c6d6e5df8055eeab5bd5 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:39:59 +0200 Subject: r8169: avoid potentially misaligned access when getting mac address Interpreting a member of an u16 array as u32 may result in a misaligned access. Also it's not really intuitive to define a mac address variable as array of three u16 words. Therefore use an array of six bytes that is properly aligned for 32 bit access. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d72b3fdf853a..6fce3cc86ae3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7767,14 +7767,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Get MAC address */ switch (tp->mac_version) { - u16 mac_addr[3]; + u8 mac_addr[ETH_ALEN] __aligned(4); case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); - *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); - if (is_valid_ether_addr((u8 *)mac_addr)) - rtl_rar_set(tp, (u8 *)mac_addr); + if (is_valid_ether_addr(mac_addr)) + rtl_rar_set(tp, mac_addr); break; default: break; -- cgit v1.2.3 From 4ff36466281428734791d3cc6331b8cca7c76019 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 2 May 2018 21:40:02 +0200 Subject: r8169: replace get_protocol with vlan_get_protocol This patch is basically the same as 6e74d1749a33 ("r8152: replace get_protocol with vlan_get_protocol"). Use vlan_get_protocol instead of duplicating the functionality. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6fce3cc86ae3..6d99b141a7aa 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6510,18 +6510,6 @@ static int msdn_giant_send_check(struct sk_buff *skb) return ret; } -static inline __be16 get_protocol(struct sk_buff *skb) -{ - __be16 protocol; - - if (skb->protocol == htons(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - - return protocol; -} - static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { @@ -6558,7 +6546,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return false; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[0] |= TD1_GTSENV4; break; @@ -6590,7 +6578,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return false; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[1] |= TD1_IPv4_CS; ip_protocol = ip_hdr(skb)->protocol; -- cgit v1.2.3 From 5f110899ae1494d47678f88e3c53c4c5f67077c4 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 3 May 2018 11:54:23 +0530 Subject: cxgb4: update latest firmware version supported Change t4fw_version.h to update latest firmware version number to 1.19.1.0. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 123e2c1b65f5..4eb15ceddca3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x10 -#define T4FW_VERSION_MICRO 0x3F +#define T4FW_VERSION_MINOR 0x13 +#define T4FW_VERSION_MICRO 0x01 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x10 -#define T5FW_VERSION_MICRO 0x3F +#define T5FW_VERSION_MINOR 0x13 +#define T5FW_VERSION_MICRO 0x01 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x10 -#define T6FW_VERSION_MICRO 0x3F +#define T6FW_VERSION_MINOR 0x13 +#define T6FW_VERSION_MICRO 0x01 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 -- cgit v1.2.3 From 9b934a3bdc8778e9cd92f65d0d08c17bb005091e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 3 May 2018 14:59:39 +0300 Subject: mlxsw: resources: Add CQE versions resources Add resources that FW uses to report supported CQE versions. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 087aad52c195..fd9299ccec72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -43,6 +43,9 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, MLXSW_RES_ID_MAX_TRAP_GROUPS, + MLXSW_RES_ID_CQE_V0, + MLXSW_RES_ID_CQE_V1, + MLXSW_RES_ID_CQE_V2, MLXSW_RES_ID_COUNTER_POOL_SIZE, MLXSW_RES_ID_MAX_SPAN, MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, @@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, + [MLXSW_RES_ID_CQE_V0] = 0x2210, + [MLXSW_RES_ID_CQE_V1] = 0x2211, + [MLXSW_RES_ID_CQE_V2] = 0x2212, [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, [MLXSW_RES_ID_MAX_SPAN] = 0x2420, [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, -- cgit v1.2.3 From b76550bbed9f9aa10b01b89698f6a5c55509b070 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 3 May 2018 14:59:40 +0300 Subject: mlxsw: pci: Introduce helpers to work with multiple CQE versions Introduce definitions of fields in CQE version 1 and 2. Also, introduce common helpers that would call appropriate version-specific helpers according to the version enum passed. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci.c | 72 +++++++++++++++------------ drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 74 ++++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 42 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 3a9381977d6d..24686ba45729 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -117,6 +117,7 @@ struct mlxsw_pci_queue { struct { u32 comp_sdq_count; u32 comp_rdq_count; + enum mlxsw_pci_cqe_v v; } cq; struct { u32 ev_cmd_count; @@ -202,24 +203,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) return owner_bit != !!(q->consumer_counter & q->count); } -static char * -mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, - u32 (*get_elem_owner_func)(const char *)) -{ - struct mlxsw_pci_queue_elem_info *elem_info; - char *elem; - bool owner_bit; - - elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - elem = elem_info->elem; - owner_bit = get_elem_owner_func(elem); - if (mlxsw_pci_elem_hw_owned(q, owner_bit)) - return NULL; - q->consumer_counter++; - rmb(); /* make sure we read owned bit before the rest of elem */ - return elem; -} - static struct mlxsw_pci_queue_type_group * mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type) @@ -505,7 +488,7 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { char *elem = mlxsw_pci_queue_elem_get(q, i); - mlxsw_pci_cqe_owner_set(elem, 1); + mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); } mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ @@ -559,7 +542,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, - char *cqe) + enum mlxsw_pci_cqe_v cqe_v, char *cqe) { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; @@ -579,10 +562,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - if (mlxsw_pci_cqe_lag_get(cqe)) { + if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { rx_info.is_lag = true; - rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); - rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); + rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe); + rx_info.lag_port_index = + mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe); } else { rx_info.is_lag = false; rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); @@ -591,7 +575,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe); - if (mlxsw_pci_cqe_crc_get(cqe)) + if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) byte_count -= ETH_FCS_LEN; skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); @@ -608,7 +592,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) { - return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; } static void mlxsw_pci_cq_tasklet(unsigned long data) @@ -621,8 +616,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); - u8 sendq = mlxsw_pci_cqe_sr_get(cqe); - u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); if (sendq) { struct mlxsw_pci_queue *sdq; @@ -636,7 +631,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, cqe); + wqe_counter, q->u.cq.v, cqe); q->u.cq.comp_rdq_count++; } if (++items == credits) @@ -696,7 +691,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) { - return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = mlxsw_pci_eqe_owner_get(elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; } static void mlxsw_pci_eq_tasklet(unsigned long data) @@ -779,8 +785,8 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, .tasklet = mlxsw_pci_cq_tasklet, - .elem_count = MLXSW_PCI_CQE_COUNT, - .elem_size = MLXSW_PCI_CQE_SIZE + .elem_count = MLXSW_PCI_CQE01_COUNT, + .elem_size = MLXSW_PCI_CQE01_SIZE }; static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { @@ -800,6 +806,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, int i; int err; + q->u.cq.v = MLXSW_PCI_CQE_V0; + spin_lock_init(&q->lock); q->num = q_num; q->count = q_ops->elem_count; @@ -938,7 +946,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || - (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || + (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) || (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index fb082ad21b00..963155f6a17a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -82,10 +82,12 @@ #define MLXSW_PCI_AQ_PAGES 8 #define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ -#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) -#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) +#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) +#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE) #define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) #define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 @@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); */ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); +enum mlxsw_pci_cqe_v { + MLXSW_PCI_CQE_V0, + MLXSW_PCI_CQE_V1, + MLXSW_PCI_CQE_V2, +}; + +#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \ +static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ +{ \ + switch (v) { \ + default: \ + case MLXSW_PCI_CQE_V0: \ + return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ + case MLXSW_PCI_CQE_V1: \ + return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ + case MLXSW_PCI_CQE_V2: \ + return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ + } \ +} \ +static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \ + char *cqe, u32 val) \ +{ \ + switch (v) { \ + default: \ + case MLXSW_PCI_CQE_V0: \ + mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ + break; \ + case MLXSW_PCI_CQE_V1: \ + mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ + break; \ + case MLXSW_PCI_CQE_V2: \ + mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ + break; \ + } \ +} + /* pci_cqe_lag * Packet arrives from a port which is a LAG */ -MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); +MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1); +MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1); +mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12); /* pci_cqe_system_port/lag_id * When lag=0: System port on which the packet was received @@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); * bits [3:0] sub_port on which the packet was received */ MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); -MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); -MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); +MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16); +mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12); +MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4); +MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8); +mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12); /* pci_cqe_wqe_counter * WQE count of the WQEs completed on the associated dqn @@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); * Length include CRC. Indicates the length field includes * the packet's CRC. */ -MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); +MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1); +MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1); +mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12); /* pci_cqe_e * CQE with Error. */ -MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); +MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1); +MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1); +mlxsw_pci_cqe_item_helpers(e, 0, 12, 12); /* pci_cqe_sr * 1 - Send Queue * 0 - Receive Queue */ -MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); +MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1); +MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1); +mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12); /* pci_cqe_dqn * Descriptor Queue (DQ) Number. */ -MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); +MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5); +MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6); +mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); /* pci_cqe_owner * Ownership bit. */ -MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); +MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1); +MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1); +mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2); /* pci_eqe_event_type * Event type. -- cgit v1.2.3 From 8404f6f2e8ed2bd667c0c95ee40561b63e0800f9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 3 May 2018 14:59:41 +0300 Subject: mlxsw: pci: Allow to use CQEs of version 1 and version 2 Use previously added resources to query FW support for multiple versions of CQEs. Use the biggest version supported. For SDQs, it has no sense to use version 2 as it does not introduce any new features, but it is twice the size of CQE version 1. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 24 ++++++++-- drivers/net/ethernet/mellanox/mlxsw/pci.c | 78 +++++++++++++++++++++++++++---- 2 files changed, 91 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 479511cf79bc..669226b0759c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -662,6 +662,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); +/* cmd_mbox_config_set_cqe_version + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -841,6 +847,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type, MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, 0x60, 0, 8, 0x08, 0x00, false); +/* cmd_mbox_config_profile_cqe_version + * CQE version: + * 0: CQE version is 0 + * 1: CQE version is either 1 or 2 + * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8); + /* ACCESS_REG - Access EMAD Supported Register * ---------------------------------- * OpMod == 0 (N/A), INMmod == 0 (N/A) @@ -1032,11 +1046,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core, 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); } -/* cmd_mbox_sw2hw_cq_cv +enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver { + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2, +}; + +/* cmd_mbox_sw2hw_cq_cqe_ver * CQE Version. - * 0 - CQE Version 0, 1 - CQE Version 1 */ -MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4); /* cmd_mbox_sw2hw_cq_c_eqn * Event Queue this CQ reports completion events to. diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 24686ba45729..e9ce0e27aa9c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -156,6 +156,8 @@ struct mlxsw_pci { } cmd; struct mlxsw_bus_info bus_info; const struct pci_device_id *id; + enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ + u8 num_sdq_cqs; /* Number of CQs used for SDQs */ }; static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) @@ -477,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, } } +static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + q->u.cq.v = mlxsw_pci->max_cqe_ver; + + /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */ + if (q->u.cq.v == MLXSW_PCI_CQE_V2 && + q->num < mlxsw_pci->num_sdq_cqs) + q->u.cq.v = MLXSW_PCI_CQE_V1; +} + static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -491,7 +504,13 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); } - mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ + if (q->u.cq.v == MLXSW_PCI_CQE_V1) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); + else if (q->u.cq.v == MLXSW_PCI_CQE_V2) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); @@ -643,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) } } +static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : + MLXSW_PCI_CQE01_COUNT; +} + +static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : + MLXSW_PCI_CQE01_SIZE; +} + static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -755,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) struct mlxsw_pci_queue_ops { const char *name; enum mlxsw_pci_queue_type type; + void (*pre_init)(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q); int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q); void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); void (*tasklet)(unsigned long data); + u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); + u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); u16 elem_count; u8 elem_size; }; @@ -782,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_CQ, + .pre_init = mlxsw_pci_cq_pre_init, .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, .tasklet = mlxsw_pci_cq_tasklet, - .elem_count = MLXSW_PCI_CQE01_COUNT, - .elem_size = MLXSW_PCI_CQE01_SIZE + .elem_count_f = mlxsw_pci_cq_elem_count, + .elem_size_f = mlxsw_pci_cq_elem_size }; static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { @@ -806,12 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, int i; int err; - q->u.cq.v = MLXSW_PCI_CQE_V0; + q->num = q_num; + if (q_ops->pre_init) + q_ops->pre_init(mlxsw_pci, q); spin_lock_init(&q->lock); - q->num = q_num; - q->count = q_ops->elem_count; - q->elem_size = q_ops->elem_size; + q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : + q_ops->elem_count; + q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : + q_ops->elem_size; q->type = q_ops->type; q->pci = mlxsw_pci; @@ -840,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, elem_info = mlxsw_pci_queue_elem_info_get(q, i); elem_info->elem = - __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); + __mlxsw_pci_queue_elem_get(q, q->elem_size, i); } mlxsw_cmd_mbox_zero(mbox); @@ -952,6 +991,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) return -EINVAL; } + mlxsw_pci->num_sdq_cqs = num_sdqs; + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, num_eqs); if (err) { @@ -1192,6 +1233,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, &profile->swid_config[i]); + if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) { + mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1); + mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1); + } + return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); } @@ -1386,6 +1432,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_query_resources; + if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2)) + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2; + else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1)) + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1; + else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) || + !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) { + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0; + } else { + dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n"); + goto err_cqe_v_check; + } + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); if (err) goto err_config_profile; @@ -1408,6 +1469,7 @@ err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: err_config_profile: +err_cqe_v_check: err_query_resources: err_boardinfo: mlxsw_pci_fw_area_fini(mlxsw_pci); -- cgit v1.2.3 From 41107685b9ce3c204be4ba784c3dd6baf9e4bcf4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 3 May 2018 14:59:42 +0300 Subject: mlxsw: pci: Check number of CQEs for CQE version 2 Check number of CQEs for CQE version 2 reported by QUERY_AQ_CAP command. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 7 ++++++- drivers/net/ethernet/mellanox/mlxsw/pci.c | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 669226b0759c..8da91b023b13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8); MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8); /* cmd_mbox_query_aq_cap_log_max_cq_sz - * Log (base 2) of max CQEs allowed on CQ. + * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1. */ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8); +/* cmd_mbox_query_aq_cap_log_max_cqv2_sz + * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8); + /* cmd_mbox_query_aq_cap_max_num_cqs * Maximum number of CQs. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index e9ce0e27aa9c..db794a1a3a7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -959,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) u8 rdq_log2sz; u8 num_cqs; u8 cq_log2sz; + u8 cqv2_log2sz; u8 num_eqs; u8 eq_log2sz; int err; @@ -974,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); + cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox); num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); @@ -986,6 +988,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) || + (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 && + (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) || (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); return -EINVAL; -- cgit v1.2.3 From 816a3bed9549fcc0c90ba97c9077e64e734f0df6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 3 May 2018 14:43:46 +0200 Subject: switchdev: Add fdb.added_by_user to switchdev notifications The following patch enables sending notifications also for events on FDB entries that weren't added by the user. Give the drivers the information necessary to distinguish between the two origins of FDB entries. To maintain the current behavior, have switchdev-implementing drivers bail out on notifications about non-user-added FDB entries. In case of mlxsw driver, allow a call to mlxsw_sp_span_respin() so that SPAN over bridge catches up with the changed FDB. Signed-off-by: Petr Machata Reviewed-by: Nikolay Aleksandrov Acked-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 4 ++++ drivers/net/ethernet/rocker/rocker_main.c | 2 ++ include/net/switchdev.h | 1 + net/bridge/br_switchdev.c | 10 +++++++--- net/dsa/slave.c | 5 ++++- 5 files changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 1af99fe5fd32..3973d90cc908 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2270,6 +2270,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); if (err) break; @@ -2279,6 +2281,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 056cb6093630..152d6948611c 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2783,6 +2783,8 @@ static int rocker_switchdev_event(struct notifier_block *unused, switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 39bc855d7fee..d574ce63bf22 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -155,6 +155,7 @@ struct switchdev_notifier_fdb_info { struct switchdev_notifier_info info; /* must be first */ const unsigned char *addr; u16 vid; + bool added_by_user; }; static inline struct net_device * diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index ee775f4ff76c..71a03c487c4b 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -102,13 +102,15 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p, static void br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, - u16 vid, struct net_device *dev) + u16 vid, struct net_device *dev, + bool added_by_user) { struct switchdev_notifier_fdb_info info; unsigned long notifier_type; info.addr = mac; info.vid = vid; + info.added_by_user = added_by_user; notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE; call_switchdev_notifiers(notifier_type, dev, &info.info); } @@ -123,12 +125,14 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) case RTM_DELNEIGH: br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr, fdb->key.vlan_id, - fdb->dst->dev); + fdb->dst->dev, + fdb->added_by_user); break; case RTM_NEWNEIGH: br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr, fdb->key.vlan_id, - fdb->dst->dev); + fdb->dst->dev, + fdb->added_by_user); break; } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index f3fb3a0880b1..c287f1ef964c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1441,6 +1441,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info = ptr; struct dsa_switchdev_event_work *switchdev_work; if (!dsa_slave_dev_check(dev)) @@ -1458,8 +1459,10 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused, switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; if (dsa_slave_switchdev_fdb_work_init(switchdev_work, - ptr)) + fdb_info)) goto err_fdb_work_init; dev_hold(dev); break; -- cgit v1.2.3 From 9c87758cf0893d6d3b51aac34546807b138cb3e7 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 30 Apr 2018 15:19:16 +0200 Subject: rt2x00: call sta_add/remove directly in rt2800 Only rt2800 subdriver of rt2x00 implement sta_add() and sta_remove(), we do not need generic version of those. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 11 +++++++---- drivers/net/wireless/ralink/rt2x00/rt2800lib.h | 5 +++-- drivers/net/wireless/ralink/rt2x00/rt2800pci.c | 6 ++---- drivers/net/wireless/ralink/rt2x00/rt2800soc.c | 6 ++---- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 6 ++---- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 4 ---- 6 files changed, 16 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index e827dc522580..a567bc273ffc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1557,12 +1557,13 @@ static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); } -int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, +int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - int wcid; - struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + int wcid; /* * Limit global maximum TX AMPDU length to smallest value of all @@ -1608,8 +1609,10 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2800_sta_add); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta) +int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); int wcid = sta_priv->wcid; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 275e3969abdd..51d9c2a932cc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -208,9 +208,10 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key); -int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, +int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta); +int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 1172eefd1c1a..71b1affc3885 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -311,8 +311,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -377,8 +377,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static const struct rt2x00_ops rt2800pci_ops = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index 6848ebc83534..a502816214ab 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -150,8 +150,8 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -216,8 +216,6 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static const struct rt2x00_ops rt2800soc_ops = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index d901a41d36e4..98a7313fea4a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -797,8 +797,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -858,8 +858,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static void rt2800usb_queue_init(struct data_queue *queue) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 1f38c338ca7a..a279a4363bc1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1457,10 +1457,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, #else #define rt2x00mac_set_key NULL #endif /* CONFIG_RT2X00_LIB_CRYPTO */ -int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr); -- cgit v1.2.3 From 811a3991510735566b66069fdd4ce3ce33a2ec18 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 30 Apr 2018 15:19:17 +0200 Subject: rt2x00: check against flushing empty queue We have check if queue is not empty when start flushing queues on by mac80211 callback, but we also can start flushing queues by internal driver calls. So move check into rt2x00queue_flush_queue() to assure we do not flush empty queue anytime. Additionally add warning if we start to kick empty queue as in such situation we set wrong index in the HW queue, what can confuse the HW and have various negative consequences. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800mmio.c | 1 + drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 3 +-- drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index 1123e2bed803..e1a7ed7e4892 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -600,6 +600,7 @@ void rt2800mmio_kick_queue(struct data_queue *queue) case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: + WARN_ON_ONCE(rt2x00queue_empty(queue)); entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index a971bc7a6b63..c380c1f56ba6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -739,8 +739,7 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; tx_queue_for_each(rt2x00dev, queue) - if (!rt2x00queue_empty(queue)) - rt2x00queue_flush_queue(queue, drop); + rt2x00queue_flush_queue(queue, drop); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index a6884e73d2ab..7c1f8f561d4a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -1000,6 +1000,8 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) (queue->qid == QID_AC_BE) || (queue->qid == QID_AC_BK); + if (rt2x00queue_empty(queue)) + return; /* * If we are not supposed to drop any pending -- cgit v1.2.3 From 070fc356e21addb579edcd7a1f2ef692b29a78e5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 30 Apr 2018 14:41:02 +0100 Subject: rtlwifi: fix spelling mistake: "dismatch" -> "mismatch" Trivial fix to spelling mistake in RT_TRACE message text. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c | 2 +- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c | 2 +- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 8fce371749d3..f22fec093f1d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -1783,7 +1783,7 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, "); + "[BTCoex], PsTdma type mismatch!!!, "); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "curPsTdma=%d, recordPsTdma=%d\n", coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 73ec31972944..279fe01bb55e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -2766,7 +2766,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 2202d5e18977..01a9d303603b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -2614,7 +2614,7 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", + "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); -- cgit v1.2.3 From 8efb868566db107d6d0130ea61a653a29f851661 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 1 May 2018 10:33:54 +0200 Subject: mt76x2: remove unnecessary break in mt76x2_mac_process_tx_rate() Remove unnecessary break statement in the default case of bw switch block in mt76x2_mac_process_tx_rate routine Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_mac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index d18315652583..dab713756004 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -410,7 +410,6 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, break; default: return -EINVAL; - break; } if (rate & MT_RXWI_RATE_SGI) -- cgit v1.2.3 From 7e7939e80e3cda3e5677c3905d20c066bfb22ef2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 1 May 2018 10:36:43 +0100 Subject: ipw2200: fix spelling mistake: "functionalitis" -> "functionalities" Trivial fix to spelling mistake in module parameter description text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 87a5e414c2f7..ba3fb1d2ddb4 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -12012,7 +12012,7 @@ MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)" #ifdef CONFIG_IPW2200_QOS module_param(qos_enable, int, 0444); -MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities"); module_param(qos_burst_enable, int, 0444); MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); -- cgit v1.2.3 From c990affd5abce1f338ac52539e092dad04647fb6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 2 May 2018 11:46:36 +0200 Subject: mt76x2: fix avg_rssi estimation Add leftover filter coefficients in IIR rssi estimation Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 038d1fe6c726..c1c38ca3330a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -508,8 +508,10 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) u8 gain_delta; int low_gain; - dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8); - dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8); + dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + + (rssi0 << 8) / 16; + dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + + (rssi1 << 8) / 16; dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + dev->cal.avg_rssi[1]) / 512; -- cgit v1.2.3 From 77cb065fde0a179f252caabfad5466cf9c522572 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 2 May 2018 22:54:48 +0300 Subject: sh_eth: use TSU register accessors for TSU_POST There's no particularly good reason TSU_POST registers get accessed circumventing sh_eth_tsu_{read|write}() -- start using those, removing (badly named) sh_eth_tsu_get_post_reg_offset(), while at it... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b6b90a6314e3..e2e8838fa981 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2610,12 +2610,6 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu) } /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ -static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, - int entry) -{ - return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); -} - static u32 sh_eth_tsu_get_post_mask(int entry) { return 0x0f << (28 - ((entry % 8) * 4)); @@ -2630,27 +2624,25 @@ static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; u32 tmp; - void *reg_offset; - reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); - tmp = ioread32(reg_offset); - iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); } static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; u32 post_mask, ref_mask, tmp; - void *reg_offset; - reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); post_mask = sh_eth_tsu_get_post_mask(entry); ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; - tmp = ioread32(reg_offset); - iowrite32(tmp & ~post_mask, reg_offset); + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); /* If other port enables, the function returns "true" */ return tmp & ref_mask; -- cgit v1.2.3 From 627a0d20fa7b9e05ea4f42f47ea84d450dbbc96f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 2 May 2018 22:55:52 +0300 Subject: sh_eth: WARN_ON() access to unimplemented TSU register Commit 3365711df024 ("sh_eth: WARN on access to a register not implemented in a particular chip") added WARN_ON() to sh_eth_{read|write}() but not to sh_eth_tsu_{read|write}(). Now that we've routed almost all TSU register accesses (except TSU_ADR{H|L} -- which are special) thru the latter pair of accessors, it makes sense to check for the unimplemented TSU registers as well... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e2e8838fa981..5970d9e5ddf1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -442,12 +442,22 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, int enum_index) { - iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->tsu_addr + offset); } static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) { - return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->tsu_addr + offset); } static void sh_eth_select_mii(struct net_device *ndev) -- cgit v1.2.3 From 2d943adf860302a4c7bfee498e64ded9fe7d9dba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2018 08:49:29 -0700 Subject: net/mlx4_en: optimizes get_fixed_ipv6_csum() While trying to support CHECKSUM_COMPLETE for IPV6 fragments, I had to experiments various hacks in get_fixed_ipv6_csum(). I must admit I could not find how to implement this :/ However, get_fixed_ipv6_csum() does a lot of redundant operations, calling csum_partial() twice. First csum_partial() computes the checksum of saddr and daddr, put in @csum_pseudo_hdr. Undone later in the second csum_partial() computed on whole ipv6 header. Then nexthdr is added once, added a second time, then substracted. payload_len is added once, then substracted. Really all this can be reduced to two add_csum(), to add back 6 bytes that were removed by mlx4 when providing hw_checksum in RX descriptor. Signed-off-by: Eric Dumazet Cc: Saeed Mahameed Cc: Tariq Toukan Reviewed-by: Saeed Mahameed Acked-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index efc55feddc5c..9f54ccbddea7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -593,30 +593,25 @@ static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) -/* In IPv6 packets, besides subtracting the pseudo header checksum, - * we also compute/add the IP header checksum which - * is not added by the HW. +/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header: + * 4 first bytes : priority, version, flow_lbl + * and 2 additional bytes : nexthdr, hop_limit. */ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, struct ipv6hdr *ipv6h) { __u8 nexthdr = ipv6h->nexthdr; - __wsum csum_pseudo_hdr = 0; + __wsum temp; if (unlikely(nexthdr == IPPROTO_FRAGMENT || nexthdr == IPPROTO_HOPOPTS || nexthdr == IPPROTO_SCTP)) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); - csum_pseudo_hdr = csum_partial(&ipv6h->saddr, - sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, - (__force __wsum)htons(nexthdr)); - - skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); - skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + /* priority, version, flow_lbl */ + temp = csum_add(hw_checksum, *(__wsum *)ipv6h); + /* nexthdr and hop_limit */ + skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr); return 0; } #endif -- cgit v1.2.3 From d034294e3c5bec1d5b5c6338991b29d19c820ea0 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 3 Apr 2018 14:03:52 +0300 Subject: net/mlx5: Decrease level of prints about non-existent MKEY User-controlled application can cause multiple prints as below to flood dmesg. Since knowledge of failed MKey release is important for debug, let's decrease its level to debug. mlx5_core 0000:00:04.0: mlx5_core_destroy_mkey:127:(pid 2352): failed radix tree delete of mkey 0x1ed700 Reported-by: Noa Osherovich Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/mr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index b9736f505bdf..f4f02f775c93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -123,8 +123,8 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key)); write_unlock_irqrestore(&table->lock, flags); if (!deleted_mkey) { - mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n", - mlx5_base_mkey(mkey->key)); + mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n", + mlx5_base_mkey(mkey->key)); return -ENOENT; } -- cgit v1.2.3 From ed644fab19db05902243f3ebf9e1245c4c79bebb Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sat, 4 Nov 2017 07:18:28 +0200 Subject: net/mlx5: Refactor num of blocks in mailbox calculation Get the logic that calculates the number of blocks in a command mailbox into a dedicated function. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 21cd1703a862..0f7062104ad9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -135,6 +135,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) return cmd->cmd_buf + (idx << cmd->log_stride); } +static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) +{ + int size = msg->len; + int blen = size - min_t(int, sizeof(msg->first.data), size); + + return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); +} + static u8 xor8_buf(void *buf, size_t offset, int len) { u8 *ptr = buf; @@ -174,10 +182,7 @@ static void calc_block_sig(struct mlx5_cmd_prot_block *block) static void calc_chain_sig(struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *next = msg->next; - int size = msg->len; - int blen = size - min_t(int, sizeof(msg->first.data), size); - int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) - / MLX5_CMD_DATA_BLOCK_SIZE; + int n = mlx5_calc_cmd_blocks(msg); int i = 0; for (i = 0; i < n && next; i++) { @@ -220,12 +225,9 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent) static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_mailbox *next = ent->out->next; + int n = mlx5_calc_cmd_blocks(ent->out); int err; u8 sig; - int size = ent->out->len; - int blen = size - min_t(int, sizeof(ent->out->first.data), size); - int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) - / MLX5_CMD_DATA_BLOCK_SIZE; int i = 0; sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); @@ -1137,7 +1139,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; struct mlx5_cmd_msg *msg; - int blen; int err; int n; int i; @@ -1146,8 +1147,8 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, if (!msg) return ERR_PTR(-ENOMEM); - blen = size - min_t(int, sizeof(msg->first.data), size); - n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + msg->len = size; + n = mlx5_calc_cmd_blocks(msg); for (i = 0; i < n; i++) { tmp = alloc_cmd_box(dev, flags); @@ -1165,7 +1166,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, head = tmp; } msg->next = head; - msg->len = size; return msg; err_alloc: -- cgit v1.2.3 From 12b996d16b83414fbbdde37b661cfd0a622f40cf Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sun, 8 Apr 2018 14:16:48 +0300 Subject: net/mlx5: Fix dump_command mailbox length printed Dump command mailbox length printed was correct only if data_only flag was set. For the case that data_only flag was clear the offset to stop printing at was wrong and so the buffer printed was too short. Changed the print loop to stop according to number of buffers in mailbox. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 0f7062104ad9..487388aed98f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -722,9 +722,11 @@ static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; + int n = mlx5_calc_cmd_blocks(msg); int data_only; u32 offset = 0; int dump_len; + int i; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); @@ -751,7 +753,7 @@ static void dump_command(struct mlx5_core_dev *dev, offset += sizeof(*ent->lay); } - while (next && offset < msg->len) { + for (i = 0; i < n && next; i++) { if (data_only) { dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); dump_buf(next->buf, dump_len, 1, offset); -- cgit v1.2.3 From 6fa242af73244b6a23dc0aa58c5c410693e407e5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 18 Feb 2018 10:35:25 +0200 Subject: net/mlx5: Cleanup unused field in Work Queue parameters Remove the 'linear' field from struct mlx5_wq_param. It is redundant, set but never read. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/wq.h | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b29c1d93f058..f60905648797 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1896,7 +1896,6 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); - param->wq.linear = 1; } static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index fca90b94596d..f3dfa0ca3c5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -38,7 +38,6 @@ #include struct mlx5_wq_param { - int linear; int buf_numa_node; int db_numa_node; }; -- cgit v1.2.3 From a8408f4e6db775e245f20edf12b13fd58cc03a1c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 3 May 2018 14:35:03 +0100 Subject: net/mlx5: fix spelling mistake: "modfiy" -> "modify" Trivial fix to spelling mistake in netdev_warn warning message Signed-off-by: Colin Ian King Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 610d485c4b03..f64b5e78519b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -565,7 +565,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv, err = mlx5_modify_rule_destination(rule, &dst, NULL); if (err) netdev_warn(priv->netdev, - "Failed to modfiy aRFS rule destination to rq=%d\n", rxq); + "Failed to modify aRFS rule destination to rq=%d\n", rxq); } static void arfs_handle_work(struct work_struct *work) -- cgit v1.2.3 From 630a4d3874a06aa9f9481cbfc688981aad7a834c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 3 May 2018 18:37:09 -0700 Subject: nfp: bpf: record offload neutral maps in the driver For asynchronous events originating from the device, like perf event output, we need to be able to make sure that objects being referred to by the FW message are valid on the host. FW events can get queued and reordered. Even if we had a FW message "barrier" we should still protect ourselves from bogus FW output. Add a reverse-mapping hash table and record in it all raw map pointers FW may refer to. Only record neutral maps, i.e. perf event arrays. These are currently the only objects FW can refer to. Use RCU protection on the read side, update side is under RTNL. Since program vs map destruction order is slightly painful for offload simply take an extra reference on all the recorded maps to make sure they don't disappear. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 25 ++++- drivers/net/ethernet/netronome/nfp/bpf/main.h | 20 +++- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 125 ++++++++++++++++++++++- drivers/net/ethernet/netronome/nfp/nfp_app.c | 2 +- kernel/bpf/syscall.c | 2 + 5 files changed, 168 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1dc424685f4e..f65df341c557 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -43,6 +43,14 @@ #include "fw.h" #include "main.h" +const struct rhashtable_params nfp_bpf_maps_neutral_params = { + .nelem_hint = 4, + .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr), + .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr), + .head_offset = offsetof(struct nfp_bpf_neutral_map, l), + .automatic_shrinking = true, +}; + static bool nfp_net_ebpf_capable(struct nfp_net *nn) { #ifdef __LITTLE_ENDIAN @@ -401,17 +409,28 @@ static int nfp_bpf_init(struct nfp_app *app) init_waitqueue_head(&bpf->cmsg_wq); INIT_LIST_HEAD(&bpf->map_list); - err = nfp_bpf_parse_capabilities(app); + err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); if (err) goto err_free_bpf; + err = nfp_bpf_parse_capabilities(app); + if (err) + goto err_free_neutral_maps; + return 0; +err_free_neutral_maps: + rhashtable_destroy(&bpf->maps_neutral); err_free_bpf: kfree(bpf); return err; } +static void nfp_check_rhashtable_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; @@ -419,6 +438,8 @@ static void nfp_bpf_clean(struct nfp_app *app) WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); + rhashtable_free_and_destroy(&bpf->maps_neutral, + nfp_check_rhashtable_empty, NULL); kfree(bpf); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 68b5d326483d..5db6284a44ba 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -114,6 +115,8 @@ enum pkt_vec { * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * + * @maps_neutral: hash table of offload-neutral maps (on pointer) + * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required @@ -150,6 +153,8 @@ struct nfp_app_bpf { unsigned int maps_in_use; unsigned int map_elems_in_use; + struct rhashtable maps_neutral; + struct nfp_bpf_cap_adjust_head { u32 flags; int off_min; @@ -199,6 +204,14 @@ struct nfp_bpf_map { enum nfp_bpf_map_use use_map[]; }; +struct nfp_bpf_neutral_map { + struct rhash_head l; + struct bpf_map *ptr; + u32 count; +}; + +extern const struct rhashtable_params nfp_bpf_maps_neutral_params; + struct nfp_prog; struct nfp_insn_meta; typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); @@ -367,6 +380,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) * @error: error code if something went wrong * @stack_depth: max stack depth from the verifier * @adjust_head_location: if program has single adjust head call - the insn no. + * @map_records_cnt: the number of map pointers recorded for this prog + * @map_records: the map record pointers from bpf->maps_neutral * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -390,6 +405,9 @@ struct nfp_prog { unsigned int stack_depth; unsigned int adjust_head_location; + unsigned int map_records_cnt; + struct nfp_bpf_neutral_map **map_records; + struct list_head insns; }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 42d98792bd25..e3ead906cc60 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -56,6 +56,126 @@ #include "../nfp_net_ctrl.h" #include "../nfp_net.h" +static int +nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, + struct bpf_map *map) +{ + struct nfp_bpf_neutral_map *record; + int err; + + /* Map record paths are entered via ndo, update side is protected. */ + ASSERT_RTNL(); + + /* Reuse path - other offloaded program is already tracking this map. */ + record = rhashtable_lookup_fast(&bpf->maps_neutral, &map, + nfp_bpf_maps_neutral_params); + if (record) { + nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; + record->count++; + return 0; + } + + /* Grab a single ref to the map for our record. The prog destroy ndo + * happens after free_used_maps(). + */ + map = bpf_map_inc(map, false); + if (IS_ERR(map)) + return PTR_ERR(map); + + record = kmalloc(sizeof(*record), GFP_KERNEL); + if (!record) { + err = -ENOMEM; + goto err_map_put; + } + + record->ptr = map; + record->count = 1; + + err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l, + nfp_bpf_maps_neutral_params); + if (err) + goto err_free_rec; + + nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; + + return 0; + +err_free_rec: + kfree(record); +err_map_put: + bpf_map_put(map); + return err; +} + +static void +nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog) +{ + bool freed = false; + int i; + + ASSERT_RTNL(); + + for (i = 0; i < nfp_prog->map_records_cnt; i++) { + if (--nfp_prog->map_records[i]->count) { + nfp_prog->map_records[i] = NULL; + continue; + } + + WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral, + &nfp_prog->map_records[i]->l, + nfp_bpf_maps_neutral_params)); + freed = true; + } + + if (freed) { + synchronize_rcu(); + + for (i = 0; i < nfp_prog->map_records_cnt; i++) + if (nfp_prog->map_records[i]) { + bpf_map_put(nfp_prog->map_records[i]->ptr); + kfree(nfp_prog->map_records[i]); + } + } + + kfree(nfp_prog->map_records); + nfp_prog->map_records = NULL; + nfp_prog->map_records_cnt = 0; +} + +static int +nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, + struct bpf_prog *prog) +{ + int i, cnt, err; + + /* Quickly count the maps we will have to remember */ + cnt = 0; + for (i = 0; i < prog->aux->used_map_cnt; i++) + if (bpf_map_offload_neutral(prog->aux->used_maps[i])) + cnt++; + if (!cnt) + return 0; + + nfp_prog->map_records = kmalloc_array(cnt, + sizeof(nfp_prog->map_records[0]), + GFP_KERNEL); + if (!nfp_prog->map_records) + return -ENOMEM; + + for (i = 0; i < prog->aux->used_map_cnt; i++) + if (bpf_map_offload_neutral(prog->aux->used_maps[i])) { + err = nfp_map_ptr_record(bpf, nfp_prog, + prog->aux->used_maps[i]); + if (err) { + nfp_map_ptrs_forget(bpf, nfp_prog); + return err; + } + } + WARN_ON(cnt != nfp_prog->map_records_cnt); + + return 0; +} + static int nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, unsigned int cnt) @@ -151,7 +271,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64); prog->aux->offload->jited_image = nfp_prog->prog; - return 0; + return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog); } static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) @@ -159,6 +279,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; kvfree(nfp_prog->prog); + nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog); nfp_prog_free(nfp_prog); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 6aedef0ad433..0e0253c7e17b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 263e13ede029..9b87198deea2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -282,6 +282,7 @@ void bpf_map_put(struct bpf_map *map) { __bpf_map_put(map, true); } +EXPORT_SYMBOL_GPL(bpf_map_put); void bpf_map_put_with_uref(struct bpf_map *map) { @@ -543,6 +544,7 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) atomic_inc(&map->usercnt); return map; } +EXPORT_SYMBOL_GPL(bpf_map_inc); struct bpf_map *bpf_map_get_with_uref(u32 ufd) { -- cgit v1.2.3 From 9816dd35ececc095f3e3be29d30d3adc755908d9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 3 May 2018 18:37:12 -0700 Subject: nfp: bpf: perf event output helpers support Add support for the perf_event_output family of helpers. The implementation on the NFP will not match the host code exactly. The state of the host map and rings is unknown to the device, hence device can't return errors when rings are not installed. The device simply packs the data into a firmware notification message and sends it over to the host, returning success to the program. There is no notion of a host CPU on the device when packets are being processed. Device will only offload programs which set BPF_F_CURRENT_CPU. Still, if map index doesn't match CPU no error will be returned (see above). Dropped/lost firmware notification messages will not cause "lost events" event on the perf ring, they are only visible via device error counters. Firmware notification messages may also get reordered in respect to the packets which caused their generation. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 16 +++++- drivers/net/ethernet/netronome/nfp/bpf/fw.h | 20 ++++++- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 32 ++++++++++- drivers/net/ethernet/netronome/nfp/bpf/main.c | 3 + drivers/net/ethernet/netronome/nfp/bpf/main.h | 4 ++ drivers/net/ethernet/netronome/nfp/bpf/offload.c | 47 +++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 69 ++++++++++++++++++++++- 7 files changed, 187 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 7e298148ca26..cb87fccb9f6a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) return nfp_bpf_cmsg_alloc(bpf, size); } +static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) +{ + struct cmsg_hdr *hdr; + + hdr = (struct cmsg_hdr *)skb->data; + + return hdr->type; +} + static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) { struct cmsg_hdr *hdr; @@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) goto err_free; } + if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { + nfp_bpf_event_output(bpf, skb); + return; + } + nfp_ctrl_lock(bpf->app->ctrl); tag = nfp_bpf_cmsg_get_tag(skb); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 39639ac28b01..3dbc21653ce5 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -37,6 +37,14 @@ #include #include +/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking + * our FW ABI. In that case we will do translation in the driver. + */ +#define NFP_BPF_SCALAR_VALUE 1 +#define NFP_BPF_MAP_VALUE 4 +#define NFP_BPF_STACK 6 +#define NFP_BPF_PACKET_DATA 8 + enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_FUNC = 1, NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, @@ -81,6 +89,7 @@ enum nfp_bpf_cmsg_type { CMSG_TYPE_MAP_DELETE = 5, CMSG_TYPE_MAP_GETNEXT = 6, CMSG_TYPE_MAP_GETFIRST = 7, + CMSG_TYPE_BPF_EVENT = 8, __CMSG_TYPE_MAP_MAX, }; @@ -155,4 +164,13 @@ struct cmsg_reply_map_op { __be32 resv; struct cmsg_key_value_pair elem[0]; }; + +struct cmsg_bpf_event { + struct cmsg_hdr hdr; + __be32 cpu_id; + __be64 map_ptr; + __be32 data_size; + __be32 pkt_size; + u8 data[0]; +}; #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 65f0791cae0c..2127cf1548dd 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -1456,6 +1456,31 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + swreg ptr_type; + u32 ret_tgt; + + ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); + + ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; + + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, + 2, RELO_BR_HELPER); + + /* Load ptr type into A1 */ + wrp_mov(nfp_prog, reg_a(1), ptr_type); + + /* Load the return address into B0 */ + wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -2411,6 +2436,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return map_call_stack_common(nfp_prog, meta); case BPF_FUNC_get_prandom_u32: return nfp_get_prandom_u32(nfp_prog, meta); + case BPF_FUNC_perf_event_output: + return nfp_perf_event_output(nfp_prog, meta); default: WARN_ONCE(1, "verifier allowed unsupported function\n"); return -EOPNOTSUPP; @@ -3353,6 +3380,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) case BPF_FUNC_map_delete_elem: val = nfp_prog->bpf->helpers.map_delete; break; + case BPF_FUNC_perf_event_output: + val = nfp_prog->bpf->helpers.perf_event_output; + break; default: pr_err("relocation of unknown helper %d\n", val); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f65df341c557..d72f9e7f42da 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -298,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) case BPF_FUNC_map_delete_elem: bpf->helpers.map_delete = readl(&cap->func_addr); break; + case BPF_FUNC_perf_event_output: + bpf->helpers.perf_event_output = readl(&cap->func_addr); + break; } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 5db6284a44ba..82682378d57f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -136,6 +136,7 @@ enum pkt_vec { * @helpers.map_lookup: map lookup helper address * @helpers.map_update: map update helper address * @helpers.map_delete: map delete helper address + * @helpers.perf_event_output: output perf event to a ring buffer * * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) */ @@ -176,6 +177,7 @@ struct nfp_app_bpf { u32 map_lookup; u32 map_update; u32 map_delete; + u32 perf_event_output; } helpers; bool pseudo_random; @@ -458,5 +460,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, void *key, void *next_key); +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb); + void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index e3ead906cc60..4db0ac1e42a8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -441,6 +441,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) } } +static unsigned long +nfp_bpf_perf_event_copy(void *dst, const void *src, + unsigned long off, unsigned long len) +{ + memcpy(dst, src + off, len); + return 0; +} + +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb) +{ + struct cmsg_bpf_event *cbe = (void *)skb->data; + u32 pkt_size, data_size; + struct bpf_map *map; + + if (skb->len < sizeof(struct cmsg_bpf_event)) + goto err_drop; + + pkt_size = be32_to_cpu(cbe->pkt_size); + data_size = be32_to_cpu(cbe->data_size); + map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr); + + if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + goto err_drop; + if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) + goto err_drop; + + rcu_read_lock(); + if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map, + nfp_bpf_maps_neutral_params)) { + rcu_read_unlock(); + pr_warn("perf event: dest map pointer %px not recognized, dropping event\n", + map); + goto err_drop; + } + + bpf_event_output(map, be32_to_cpu(cbe->cpu_id), + &cbe->data[round_up(pkt_size, 4)], data_size, + cbe->data, pkt_size, nfp_bpf_perf_event_copy); + rcu_read_unlock(); + + dev_consume_skb_any(skb); + return 0; +err_drop: + dev_kfree_skb_any(skb); + return -EINVAL; +} + static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 06ad53ce4ad9..c76b622743ae 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -36,6 +36,8 @@ #include #include +#include "../nfp_app.h" +#include "../nfp_main.h" #include "fw.h" #include "main.h" @@ -216,6 +218,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n"); return -EOPNOTSUPP; + case BPF_FUNC_perf_event_output: + BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE || + NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE || + NFP_BPF_STACK != PTR_TO_STACK || + NFP_BPF_PACKET_DATA != PTR_TO_PACKET); + + if (!bpf->helpers.perf_event_output) { + pr_vlog(env, "event_output: not supported by FW\n"); + return -EOPNOTSUPP; + } + + /* Force current CPU to make sure we can report the event + * wherever we get the control message from FW. + */ + if (reg3->var_off.mask & BPF_F_INDEX_MASK || + (reg3->var_off.value & BPF_F_INDEX_MASK) != + BPF_F_CURRENT_CPU) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off); + pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n", + tn_buf); + return -EOPNOTSUPP; + } + + /* Save space in meta, we don't care about arguments other + * than 4th meta, shove it into arg1. + */ + reg1 = cur_regs(env) + BPF_REG_4; + + if (reg1->type != SCALAR_VALUE /* NULL ptr */ && + reg1->type != PTR_TO_STACK && + reg1->type != PTR_TO_MAP_VALUE && + reg1->type != PTR_TO_PACKET) { + pr_vlog(env, "event_output: unsupported ptr type: %d\n", + reg1->type); + return -EOPNOTSUPP; + } + + if (reg1->type == PTR_TO_STACK && + !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL)) + return -EOPNOTSUPP; + + /* Warn user that on offload NFP may return success even if map + * is not going to accept the event, since the event output is + * fully async and device won't know the state of the map. + * There is also FW limitation on the event length. + * + * Lost events will not show up on the perf ring, driver + * won't see them at all. Events may also get reordered. + */ + dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev, + "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n"); + pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n"); + + if (!meta->func_id) + break; + + if (reg1->type != meta->arg1.type) { + pr_vlog(env, "event_output: ptr type changed: %d %d\n", + meta->arg1.type, reg1->type); + return -EINVAL; + } + break; + default: pr_vlog(env, "unsupported function id: %d\n", func_id); return -EOPNOTSUPP; -- cgit v1.2.3 From b4264c96b5cbc00c4c07deb9fbab928d43dffcf9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 3 May 2018 18:37:13 -0700 Subject: nfp: bpf: rewrite map pointers with NFP TIDs Kernel will now replace map fds with actual pointer before calling the offload prepare. We can identify those pointers and replace them with NFP table IDs instead of loading the table ID in code generated for CALL instruction. This allows us to support having the same CALL being used with different maps. Since we don't want to change the FW ABI we still need to move the TID from R1 to portion of R0 before the jump. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Reviewed-by: Jiong Wang Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 44 ++++++++++++++++------- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 9 ----- 2 files changed, 32 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 2127cf1548dd..326a2085d650 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1395,15 +1395,9 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - struct bpf_offloaded_map *offmap; - struct nfp_bpf_map *nfp_map; bool load_lm_ptr; u32 ret_tgt; s64 lm_off; - swreg tid; - - offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; - nfp_map = offmap->dev_priv; /* We only have to reload LM0 if the key is not at start of stack */ lm_off = nfp_prog->stack_depth; @@ -1416,17 +1410,12 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->func_id == BPF_FUNC_map_update_elem) emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); - /* Load map ID into a register, it should actually fit as an immediate - * but in case it doesn't deal with it here, not in the delay slots. - */ - tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); - emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 2, RELO_BR_HELPER); ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; /* Load map ID into A0 */ - wrp_mov(nfp_prog, reg_a(0), tid); + wrp_mov(nfp_prog, reg_a(0), reg_a(2)); /* Load the return address into B0 */ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); @@ -3254,6 +3243,33 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) return 0; } +static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta1, *meta2; + struct nfp_bpf_map *nfp_map; + struct bpf_map *map; + + nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { + if (meta1->skip || meta2->skip) + continue; + + if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || + meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) + continue; + + map = (void *)(unsigned long)((u32)meta1->insn.imm | + (u64)meta2->insn.imm << 32); + if (bpf_map_offload_neutral(map)) + continue; + nfp_map = map_to_offmap(map)->dev_priv; + + meta1->insn.imm = nfp_map->tid; + meta2->insn.imm = 0; + } + + return 0; +} + static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) { __le64 *ustore = (__force __le64 *)prog; @@ -3290,6 +3306,10 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog) { int ret; + ret = nfp_bpf_replace_map_ptrs(nfp_prog); + if (ret) + return ret; + ret = nfp_bpf_optimize(nfp_prog); if (ret) return ret; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index c76b622743ae..e163f3cfa47d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -151,15 +151,6 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, return false; } - /* Rest of the checks is only if we re-parse the same insn */ - if (!meta->func_id) - return true; - - if (meta->arg1.map_ptr != reg1->map_ptr) { - pr_vlog(env, "%s: called for different map\n", fname); - return false; - } - return true; } -- cgit v1.2.3 From 577b995be5f5235e7bfe48536c72a5d1f0084f6b Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 4 May 2018 17:17:47 +0200 Subject: net: 3com: 3c59x: Move boomerang/vortex conditional into function If vp->full_bus_master_tx is set, vp->full_bus_master_rx is set as well (see vortex_probe1()). Therefore the conditionals for the decision if boomerang or vortex ISR is executed have the same result. Instead of repeating the explicit conditional execution of the boomerang/vortex ISR, move it into an own function. No functional change. Cc: Steffen Klassert Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 36c8950dbd2d..0cfdb07f3e59 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); -static irqreturn_t vortex_interrupt(int irq, void *dev_id); -static irqreturn_t boomerang_interrupt(int irq, void *dev_id); +static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id); +static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev); +static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev); static int vortex_close(struct net_device *dev); static void dump_tx_ring(struct net_device *dev); static void update_stats(void __iomem *ioaddr, struct net_device *dev); @@ -838,10 +839,9 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { - struct vortex_private *vp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); + vortex_boomerang_interrupt(dev->irq, dev); local_irq_restore(flags); } #endif @@ -1729,8 +1729,7 @@ vortex_open(struct net_device *dev) dma_addr_t dma; /* Use the now-standard shared IRQ implementation. */ - if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? - boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { + if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); goto err; } @@ -1911,10 +1910,7 @@ static void vortex_tx_timeout(struct net_device *dev) */ unsigned long flags; local_irq_save(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); + vortex_boomerang_interrupt(dev->irq, dev); local_irq_restore(flags); } } @@ -2267,9 +2263,8 @@ out_dma_err: */ static irqreturn_t -vortex_interrupt(int irq, void *dev_id) +_vortex_interrupt(int irq, struct net_device *dev) { - struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; @@ -2386,9 +2381,8 @@ handler_exit: */ static irqreturn_t -boomerang_interrupt(int irq, void *dev_id) +_boomerang_interrupt(int irq, struct net_device *dev) { - struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; @@ -2526,6 +2520,18 @@ handler_exit: return IRQ_RETVAL(handled); } +static irqreturn_t +vortex_boomerang_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + + if (vp->full_bus_master_rx) + return _boomerang_interrupt(dev->irq, dev); + else + return _vortex_interrupt(dev->irq, dev); +} + static int vortex_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); -- cgit v1.2.3 From 47f66c766c8d91fe0a20b85359ce13e467863697 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 4 May 2018 17:17:48 +0200 Subject: net: 3com: 3c59x: Pull locking out of ISR Locking is done in the same way in _vortex_interrupt() and _boomerang_interrupt(). To prevent duplication, move the locking into the calling vortex_boomerang_interrupt() function. No functional change. Cc: Steffen Klassert Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 0cfdb07f3e59..fdafe25da153 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2273,7 +2273,6 @@ _vortex_interrupt(int irq, struct net_device *dev) unsigned int bytes_compl = 0, pkts_compl = 0; ioaddr = vp->ioaddr; - spin_lock(&vp->lock); status = ioread16(ioaddr + EL3_STATUS); @@ -2371,7 +2370,6 @@ _vortex_interrupt(int irq, struct net_device *dev) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: - spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } @@ -2392,12 +2390,6 @@ _boomerang_interrupt(int irq, struct net_device *dev) ioaddr = vp->ioaddr; - - /* - * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout - * and boomerang_start_xmit - */ - spin_lock(&vp->lock); vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); @@ -2516,7 +2508,6 @@ _boomerang_interrupt(int irq, struct net_device *dev) dev->name, status); handler_exit: vp->handling_irq = 0; - spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } @@ -2525,11 +2516,18 @@ vortex_boomerang_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); + irqreturn_t ret; + + spin_lock(&vp->lock); if (vp->full_bus_master_rx) - return _boomerang_interrupt(dev->irq, dev); + ret = _boomerang_interrupt(dev->irq, dev); else - return _vortex_interrupt(dev->irq, dev); + ret = _vortex_interrupt(dev->irq, dev); + + spin_unlock(&vp->lock); + + return ret; } static int vortex_rx(struct net_device *dev) -- cgit v1.2.3 From dea4c96e65b3c5520f1e9d9e038e79d59f5688f1 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 4 May 2018 17:17:49 +0200 Subject: net: 3com: 3c59x: irq save variant of ISR When vortex_boomerang_interrupt() is invoked from vortex_tx_timeout() or poll_vortex() interrupts must be disabled. This detaches the interrupt disable logic from locking which requires patching for PREEMPT_RT. The advantage of avoiding spin_lock_irqsave() in the interrupt handler is minimal, but converting it removes all the extra code for callers which come not from interrupt context. Cc: Steffen Klassert Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index fdafe25da153..cabbe227bb98 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -839,10 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { - unsigned long flags; - local_irq_save(flags); vortex_boomerang_interrupt(dev->irq, dev); - local_irq_restore(flags); } #endif @@ -1904,15 +1901,7 @@ static void vortex_tx_timeout(struct net_device *dev) pr_err("%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ - { - /* - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; - local_irq_save(flags); - vortex_boomerang_interrupt(dev->irq, dev); - local_irq_restore(flags); - } + vortex_boomerang_interrupt(dev->irq, dev); } if (vortex_debug > 0) @@ -2516,16 +2505,17 @@ vortex_boomerang_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; irqreturn_t ret; - spin_lock(&vp->lock); + spin_lock_irqsave(&vp->lock, flags); if (vp->full_bus_master_rx) ret = _boomerang_interrupt(dev->irq, dev); else ret = _vortex_interrupt(dev->irq, dev); - spin_unlock(&vp->lock); + spin_unlock_irqrestore(&vp->lock, flags); return ret; } -- cgit v1.2.3 From 18b338f5f9539512e76fd9ebd4c6ca1a0e159e2b Mon Sep 17 00:00:00 2001 From: Weilin Chang Date: Fri, 4 May 2018 11:07:19 -0700 Subject: liquidio: support use of ethtool to set link speed of CN23XX-225 cards Support setting the link speed of CN23XX-225 cards (which can do 25Gbps or 10Gbps) via ethtool_ops.set_link_ksettings. Also fix the function assigned to ethtool_ops.get_link_ksettings to use the new link_ksettings api completely (instead of partially via ethtool_convert_legacy_u32_to_link_mode). Signed-off-by: Weilin Chang Acked-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 196 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 195 +++++++++++++++++--- drivers/net/ethernet/cavium/liquidio/lio_main.c | 20 +++ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 5 + .../net/ethernet/cavium/liquidio/liquidio_common.h | 4 + .../net/ethernet/cavium/liquidio/octeon_device.h | 14 ++ .../net/ethernet/cavium/liquidio/octeon_network.h | 15 ++ 7 files changed, 425 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 6821afcdc365..8093c5eafea2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1481,3 +1481,199 @@ int octnet_get_link_stats(struct net_device *netdev) return 0; } + +static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_soft_command *sc = buf; + + ctx = sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (status) { + dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n", + __func__, + CVM_CAST64(status)); + } + ctx->status = status; + complete(&ctx->complete); +} + +int liquidio_set_speed(struct lio *lio, int speed) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + u32 ctx_size; + int retval; + u32 var; + + if (oct->speed_setting == speed) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n", + __func__); + return -EOPNOTSUPP; + } + + ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + ctx_size); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + ctx = sc->ctxptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + ctx->octeon_id = lio_get_device_id(oct); + ctx->status = 0; + init_completion(&ctx->complete); + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_SET; + ncmd->s.param1 = speed; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + sc->callback = liquidio_nic_seapi_ctl_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + retval = -EBUSY; + } else { + /* Wait for response or timeout */ + if (wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies(10000)) == 0) { + dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", + __func__); + octeon_free_soft_command(oct, sc); + return -EINTR; + } + + retval = resp->status; + + if (retval) { + dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", + __func__, retval); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + var = be32_to_cpu((__force __be32)resp->speed); + if (var != speed) { + dev_err(&oct->pci_dev->dev, + "%s: setting failed speed= %x, expect %x\n", + __func__, var, speed); + } + + oct->speed_setting = var; + } + + octeon_free_soft_command(oct, sc); + + return retval; +} + +int liquidio_get_speed(struct lio *lio) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + u32 ctx_size; + int retval; + + ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + ctx_size); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + ctx = sc->ctxptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + ctx->octeon_id = lio_get_device_id(oct); + ctx->status = 0; + init_completion(&ctx->complete); + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + sc->callback = liquidio_nic_seapi_ctl_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + oct->no_speed_setting = 1; + oct->speed_setting = 25; + + retval = -EBUSY; + } else { + if (wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies(10000)) == 0) { + dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", + __func__); + + oct->speed_setting = 25; + oct->no_speed_setting = 1; + + octeon_free_soft_command(oct, sc); + + return -EINTR; + } + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, + "%s failed retval=%d\n", __func__, retval); + oct->no_speed_setting = 1; + oct->speed_setting = 25; + octeon_free_soft_command(oct, sc); + retval = -EIO; + } else { + u32 var; + + var = be32_to_cpu((__force __be32)resp->speed); + oct->speed_setting = var; + if (var == 0xffff) { + oct->no_speed_setting = 1; + /* unable to access boot variables + * get the default value based on the NIC type + */ + oct->speed_setting = 25; + } + } + } + + octeon_free_soft_command(oct, sc); + + return retval; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index a1d84304a608..06f7449c569d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -230,46 +230,147 @@ static int lio_get_link_ksettings(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; - u32 supported = 0, advertising = 0; linfo = &lio->linfo; + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + switch (linfo->link.s.phy_type) { case LIO_PHY_PORT_TP: ecmd->base.port = PORT_TP; - supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_TP | SUPPORTED_Pause); - advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + break; case LIO_PHY_PORT_FIBRE: - ecmd->base.port = PORT_FIBRE; - - if (linfo->link.s.speed == SPEED_10000) { - supported = SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_10000baseT_Full; + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + } else { + dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", + linfo->link.s.if_mode); } - supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; - advertising |= ADVERTISED_Pause; + ecmd->base.port = PORT_FIBRE; ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (OCTEON_CN23XX_PF(oct)) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseCR_Full); + + if (oct->no_speed_setting == 0) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + } + + if (oct->no_speed_setting == 0) + liquidio_get_speed(lio); + else + oct->speed_setting = 25; + + if (oct->speed_setting == 10) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + if (oct->speed_setting == 25) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + } else { /* VF */ + if (linfo->link.s.speed == 10000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + + if (linfo->link.s.speed == 25000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + } + } else { + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + } break; } - if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || - linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XFI) { - ethtool_convert_legacy_u32_to_link_mode( - ecmd->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode( - ecmd->link_modes.advertising, advertising); - } else { - dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", - linfo->link.s.if_mode); - } - if (linfo->link.s.link_up) { ecmd->base.speed = linfo->link.s.speed; ecmd->base.duplex = linfo->link.s.duplex; @@ -281,6 +382,51 @@ static int lio_get_link_ksettings(struct net_device *netdev, return 0; } +static int lio_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) +{ + const int speed = ecmd->base.speed; + struct lio *lio = GET_LIO(netdev); + struct oct_link_info *linfo; + struct octeon_device *oct; + u32 is25G = 0; + + oct = lio->oct_dev; + + linfo = &lio->linfo; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + is25G = 1; + } else { + return -EOPNOTSUPP; + } + + if (oct->no_speed_setting) { + dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", + __func__); + return -EOPNOTSUPP; + } + + if ((ecmd->base.duplex != DUPLEX_UNKNOWN && + ecmd->base.duplex != linfo->link.s.duplex) || + ecmd->base.autoneg != AUTONEG_DISABLE || + (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && + ecmd->base.speed != SPEED_UNKNOWN)) + return -EOPNOTSUPP; + + if ((oct->speed_boot == speed / 1000) && + oct->speed_boot == oct->speed_setting) + return 0; + + liquidio_set_speed(lio, speed / 1000); + + dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", + oct->speed_setting); + + return 0; +} + static void lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -2966,6 +3112,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) static const struct ethtool_ops lio_ethtool_ops = { .get_link_ksettings = lio_get_link_ksettings, + .set_link_ksettings = lio_set_link_ksettings, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index ee75048b3937..e500528ad751 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -912,6 +912,9 @@ liquidio_probe(struct pci_dev *pdev, /* set linux specific device pointer */ oct_dev->pci_dev = (void *)pdev; + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + hs = &handshake[oct_dev->octeon_id]; init_completion(&hs->init); init_completion(&hs->started); @@ -3664,6 +3667,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "NIC ifidx:%d Setup successful\n", i); octeon_free_soft_command(octeon_dev, sc); + + if (octeon_dev->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + octeon_dev->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + liquidio_get_speed(lio); + + if (octeon_dev->speed_setting == 0) { + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } + } else { + octeon_dev->no_speed_setting = 1; + octeon_dev->speed_setting = 10; + } + octeon_dev->speed_boot = octeon_dev->speed_setting; + } devlink = devlink_alloc(&liquidio_devlink_ops, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 6295eeec795c..7fa0212873ac 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -411,6 +411,9 @@ liquidio_vf_probe(struct pci_dev *pdev, /* set linux specific device pointer */ oct_dev->pci_dev = pdev; + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + if (octeon_device_init(oct_dev)) { liquidio_vf_remove(pdev); return -ENOMEM; @@ -2199,6 +2202,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "NIC ifidx:%d Setup successful\n", i); octeon_free_soft_command(octeon_dev, sc); + + octeon_dev->no_speed_setting = 1; } return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 026570499dcf..285b24836974 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -93,6 +93,7 @@ enum octeon_tag_type { #define OPCODE_NIC_VF_REP_PKT 0x15 #define OPCODE_NIC_VF_REP_CMD 0x16 +#define OPCODE_NIC_UBOOT_CTL 0x17 #define CORE_DRV_TEST_SCATTER_OP 0xFFF5 @@ -249,6 +250,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define SEAPI_CMD_SPEED_SET 0x2 +#define SEAPI_CMD_SPEED_GET 0x3 + #define LIO_CMD_WAIT_TM 100 /* RX(packets coming from wire) Checksum verification flags */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 9430c0a0bb8c..94a4ed88d618 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -43,6 +43,13 @@ #define OCTEON_CN23XX_REV_1_1 0x01 #define OCTEON_CN23XX_REV_2_0 0x80 +/**SubsystemId for the chips */ +#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d +#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d +#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d +#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d +#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d + /** Endian-swap modes supported by Octeon. */ enum octeon_pci_swap_mode { OCTEON_PCI_PASSTHROUGH = 0, @@ -430,6 +437,8 @@ struct octeon_device { u16 rev_id; + u32 subsystem_id; + u16 pf_num; u16 vf_num; @@ -584,6 +593,11 @@ struct octeon_device { struct lio_vf_rep_list vf_rep_list; struct devlink *devlink; enum devlink_eswitch_mode eswitch_mode; + + /* for 25G NIC speed change */ + u8 speed_boot; + u8 speed_setting; + u8 no_speed_setting; }; #define OCT_DRV_ONLINE 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 8571f11e3c8f..dd3177a526d2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -81,6 +81,18 @@ struct oct_nic_stats_ctrl { struct net_device *netdev; }; +struct oct_nic_seapi_resp { + u64 rh; + u32 speed; + u64 status; +}; + +struct liquidio_nic_seapi_ctl_context { + int octeon_id; + u32 status; + struct completion complete; +}; + /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -230,6 +242,9 @@ void lio_delete_glists(struct lio *lio); int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); +int liquidio_get_speed(struct lio *lio); +int liquidio_set_speed(struct lio *lio, int speed); + /** * \brief Net device change_mtu * @param netdev network device -- cgit v1.2.3 From 0bc5fe857274133ca028ebb15ff2e8549a369916 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:42:59 -0700 Subject: qed*: Refactor mf_mode to consist of bits. `mf_mode' field indicates the multi-partitioning mode the device is configured to. This method doesn't scale very well, adding a new MF mode requires going over all the existing conditions, and deciding whether those are needed for the new mode or not. The patch defines a set of bit-fields for modes which are derived according to the mode info shared by the MFW and all the configuration would be made according to those. To add a new mode, there would be a single place where we'll need to go and choose which bits apply and which don't. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 41 ++++++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_dev.c | 39 +++++++++++---------- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 6 ++-- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 ++-- drivers/net/ethernet/qlogic/qed/qed_sp.h | 3 +- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 16 +++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 4 +-- include/linux/qed/qed_if.h | 2 +- 8 files changed, 71 insertions(+), 46 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e07460a68d30..c8f3507d1151 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -439,6 +439,41 @@ struct qed_fw_data { u32 init_ops_size; }; +enum qed_mf_mode_bit { + /* Supports PF-classification based on tag */ + QED_MF_OVLAN_CLSS, + + /* Supports PF-classification based on MAC */ + QED_MF_LLH_MAC_CLSS, + + /* Supports PF-classification based on protocol type */ + QED_MF_LLH_PROTO_CLSS, + + /* Requires a default PF to be set */ + QED_MF_NEED_DEF_PF, + + /* Allow LL2 to multicast/broadcast */ + QED_MF_LL2_NON_UNICAST, + + /* Allow Cross-PF [& child VFs] Tx-switching */ + QED_MF_INTER_PF_SWITCH, + + /* Unified Fabtic Port support enabled */ + QED_MF_UFP_SPECIFIC, + + /* Disable Accelerated Receive Flow Steering (aRFS) */ + QED_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + QED_MF_8021Q_TAGGING, + + /* Use stag for steering */ + QED_MF_8021AD_TAGGING, + + /* Allow DSCP to TC mapping */ + QED_MF_DSCP_TO_TC_MAP, +}; + enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ @@ -669,10 +704,8 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum qed_mf_mode mf_mode; -#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) + + unsigned long mf_bits; int pcie_width; int pcie_speed; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d2ad5e92c74f..9b07d7f25042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) return -EINVAL; } - switch (p_hwfn->cdev->mf_mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) hw_mode |= 1 << MODE_MF_SD; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else hw_mode |= 1 << MODE_MF_SI; - } hw_mode |= 1 << MODE_ASIC; @@ -1557,7 +1549,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, - p_hwfn->cdev->mf_mode, allow_npar_tx_switch); if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); @@ -2651,17 +2642,25 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = QED_MF_OVLAN; + p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = QED_MF_NPAR; + p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST) | + BIT(QED_MF_INTER_PF_SWITCH); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: - p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; + p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST); + if (QED_IS_BB(p_hwfn->cdev)) + p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); break; } - DP_INFO(p_hwfn, "Multi function mode is %08x\n", - p_hwfn->cdev->mf_mode); + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + p_hwfn->cdev->mf_bits); /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -3462,7 +3461,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return 0; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3507,7 +3506,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3549,7 +3548,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return 0; switch (type) { @@ -3647,7 +3646,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return; switch (type) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 38502815d681..6c942c133b2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -922,9 +922,9 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; - if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && - (conn_type != QED_LL2_TYPE_IWARP)) { + if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && + p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && + conn_type != QED_LL2_TYPE_IWARP) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d1d3787affe8..307fe33d75c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -264,7 +264,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); - dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); + dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS, + &cdev->mf_bits); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); @@ -273,7 +274,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION; - dev_info->mf_mode = cdev->mf_mode; + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, + &cdev->mf_bits); dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ab4ad8a1e2a5..768022235451 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param p_tunn - * @param mode * @param allow_npar_tx_switch * * @return int @@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch); + bool allow_npar_tx_switch); /** * @brief qed_sp_pf_update - PF Function Update Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 5e927b6cac22..fbb317257629 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch) + bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); @@ -339,18 +339,10 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); - switch (mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - p_ramrod->mf_mode = MF_NPAR; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else p_ramrod->mf_mode = MF_NPAR; - } p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); @@ -365,7 +357,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); - if (IS_MF_SI(p_hwfn)) + if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..89c581c3c21a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -199,7 +199,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && - qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + !qed_info->b_inter_pf_switch && qed_info->tx_switching) { vport_params->vport_id = 0; vport_params->update_tx_switching_flg = 1; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; @@ -1928,7 +1928,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 1; - if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { vport_update_params->update_tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e53f9c7c2809..5dac56147a07 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -359,7 +359,7 @@ struct qed_dev_info { #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; - u8 mf_mode; + bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; u16 mtu; -- cgit v1.2.3 From 27bf96e32c92599dc7523b36d6c761fc8312c8c0 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:00 -0700 Subject: qed: Remove unused data member 'is_mf_default'. The data member 'is_mf_default' is not used by the qed/qede drivers, removing the same. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 -- include/linux/qed/qed_if.h | 1 - 2 files changed, 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 307fe33d75c8..70bc5634675c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -264,8 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); - dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS, - &cdev->mf_bits); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 5dac56147a07..907976fd56f7 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -339,7 +339,6 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; - bool is_mf_default; /* FW version */ u16 fw_major; -- cgit v1.2.3 From b51bdfb9cbe2ecf99a4c45c48c6286963344786c Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:01 -0700 Subject: qed: Add support for multi function mode with 802.1ad tagging. The patch adds support for new Multi function mode wherein the traffic classification is done based on the 802.1ad tagging and the outer vlan tag provided by the management firmware. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 64 ++++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 5 ++ 2 files changed, 49 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9b07d7f25042..95d00cbe1a7f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1668,6 +1668,18 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (rc) return rc; + if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits)) { + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ETH_P_8021AD); + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, + ETH_P_8021AD); + } + qed_fill_load_req_params(&load_req_params, p_params->p_drv_load_params); rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, @@ -2630,39 +2642,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link->pause.autoneg, p_caps->default_eee, p_caps->eee_lpi_timer); - /* Read Multi-function information from shmem */ - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, glob) + - offsetof(struct nvm_cfg1_glob, generic_cont0); + if (IS_LEAD_HWFN(p_hwfn)) { + struct qed_dev *cdev = p_hwfn->cdev; - generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, generic_cont0); - mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> - NVM_CFG1_GLOB_MF_MODE_OFFSET; + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); - switch (mf_mode) { - case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); - break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_8021AD_TAGGING); + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | BIT(QED_MF_INTER_PF_SWITCH); - break; - case NVM_CFG1_GLOB_MF_MODE_DEFAULT: - p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST); - if (QED_IS_BB(p_hwfn->cdev)) - p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); - break; + if (QED_IS_BB(p_hwfn->cdev)) + cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); + break; + } + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + cdev->mf_bits); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", p_hwfn->cdev->mf_bits); - /* Read Multi-function information from shmem */ + /* Read device capabilities information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, device_capabilities); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index fbb317257629..26bed26b9071 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -346,6 +346,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, -- cgit v1.2.3 From cac6f691546b9efd50c31c0db97fe50d0357104a Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 5 May 2018 18:43:02 -0700 Subject: qed: Add support for Unified Fabric Port. This patch adds driver changes for supporting the Unified Fabric Port (UFP). This is a new paritioning mode wherein MFW provides the set of parameters to be used by the device such as traffic class, outer-vlan tag value, priority type etc. Drivers receives this info via notifications from mfw and configures the hardware accordingly. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 20 ++++++ drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 14 +++- drivers/net/ethernet/qlogic/qed/qed_dev.c | 32 ++++++++-- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 3 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 28 ++++++++ drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 ++++++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 78 +++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 8 +++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 9 +++ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 57 ++++++++++++++++- drivers/scsi/qedf/qedf_fip.c | 7 +- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qedi/qedi_iscsi.c | 2 +- include/linux/qed/qed_ll2_if.h | 10 ++- 14 files changed, 283 insertions(+), 27 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index c8f3507d1151..adcff495466e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -474,6 +474,24 @@ enum qed_mf_mode_bit { QED_MF_DSCP_TO_TC_MAP, }; +enum qed_ufp_mode { + QED_UFP_MODE_ETS, + QED_UFP_MODE_VNIC_BW, + QED_UFP_MODE_UNKNOWN +}; + +enum qed_ufp_pri_type { + QED_UFP_PRI_OS, + QED_UFP_PRI_VNIC, + QED_UFP_PRI_UNKNOWN +}; + +struct qed_ufp_info { + enum qed_ufp_pri_type pri_type; + enum qed_ufp_mode mode; + u8 tc; +}; + enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ @@ -582,6 +600,8 @@ struct qed_hwfn { struct qed_dcbx_info *p_dcbx_info; + struct qed_ufp_info ufp_info; + struct qed_dmae_info dmae_info; /* QM init */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 449777f21237..8f31406ec894 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, u32 pri_tc_tbl, int count, u8 dcbx_version) { enum dcbx_protocol_type type; + bool enable, ieee, eth_tlv; u8 tc, priority_map; - bool enable, ieee; u16 protocol_id; int priority; int i; @@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); + eth_tlv = false; /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ - enable = !(type == DCBX_PROTOCOL_ETH); + if (type == DCBX_PROTOCOL_ETH) { + enable = false; + eth_tlv = true; + } else { + enable = true; + } qed_dcbx_update_app_info(p_data, p_hwfn, enable, priority, tc, type); } } + /* If Eth TLV is not detected, use UFP TC as default TC */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; + /* Update ramrod protocol data and hw_info fields * with default info when corresponding APP TLV's are not detected. * The enabled field has a different logic for ethernet as only for diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 95d00cbe1a7f..560528962658 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1499,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, p_hwfn->hw_info.ovlan); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, + 1); } /* Enable classification by MAC if needed */ @@ -1635,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; + u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -1668,16 +1674,22 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (rc) return rc; - if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING, - &cdev->mf_bits)) { + if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, + &cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits))) { + if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) + ether_type = ETH_P_8021Q; + else + ether_type = ETH_P_8021AD; STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, - ETH_P_8021AD); + ether_type); STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, - ETH_P_8021AD); + ether_type); } qed_fill_load_req_params(&load_req_params, @@ -2659,6 +2671,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_UFP_SPECIFIC) | + BIT(QED_MF_8021Q_TAGGING); + break; case NVM_CFG1_GLOB_MF_MODE_BD: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | @@ -2879,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_get_eee_caps(p_hwfn, p_ptt); + + qed_mcp_read_ufp_config(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 2dc9b312a795..cc1b373c0ace 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, p_data->d_id.addr_mid = p_conn->d_id.addr_mid; p_data->d_id.addr_lo = p_conn->d_id.addr_lo; p_data->flags = p_conn->flags; + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + SET_FIELD(p_data->flags, + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); p_data->def_q_idx = p_conn->def_q_idx; return qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 7f5ec42dde48..b5f70eff0182 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11993,6 +11993,16 @@ struct public_port { #define EEE_REMOTE_TW_TX_OFFSET 0 #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 }; struct public_func { @@ -12069,6 +12079,23 @@ struct public_func { #define DRV_ID_DRV_INIT_HW_MASK 0x80000000 #define DRV_ID_DRV_INIT_HW_SHIFT 31 #define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 }; struct mcp_mac { @@ -12495,6 +12522,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, + MFW_DRV_MSG_OEM_CFG_UPDATE, MFW_DRV_MSG_MAX }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6c942c133b2c..c3c1a99a0252 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -919,6 +919,10 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_stripping_en = p_ll2_conn->input.rx_vlan_removal_en; + + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) + p_ramrod->report_outer_vlan = 1; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; @@ -1493,11 +1497,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_add_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -1653,11 +1658,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) + p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); - else + } else { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) + pkt->remove_stag = true; + } + SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); @@ -1668,6 +1678,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, + !!(pkt->remove_stag)); + start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); @@ -1884,11 +1897,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -2360,7 +2374,8 @@ fail: return -EINVAL; } -static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) +static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags) { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; @@ -2405,6 +2420,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) pkt.first_frag = mapping; pkt.first_frag_len = skb->len; pkt.cookie = skb; + if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) + pkt.remove_stag = true; rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 0550f0ee11b3..e80f5e7c7992 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -40,6 +40,7 @@ #include #include #include "qed.h" +#include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) &resp, ¶m); } +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + return; + + memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, oem_cfg_port)); + val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> + OEM_CFG_CHANNEL_TYPE_OFFSET; + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + + val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; + if (val == OEM_CFG_SCHED_TYPE_ETS) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; + } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; + } else { + p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + } + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; + p_hwfn->ufp_info.tc = (u8)val; + val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; + } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; + } else { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + } + + DP_NOTICE(p_hwfn, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, + p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); +} + +static int +qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + qed_qm_reconf(p_hwfn, p_ptt); + } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { + /* Merge UFP TC with the dcbx TC data */ + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + } else { + DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); + return -EINVAL; + } + + /* update storm FW with negotiation results */ + qed_sp_pf_update_ufp(p_hwfn); + + /* update stag pcp value */ + qed_sp_pf_update_stag(p_hwfn); + + return 0; +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_OPERATIONAL_MIB); break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + qed_mcp_handle_ufp_event(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 3af3896420b9..250579ba632b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1004,6 +1004,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Read ufp config from the shared memory. + * + * @param p_hwfn + * @param p_ptt + */ +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /** * @brief Populate the nvm info shadow in the given hardware function * diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 768022235451..e95431f6acd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -462,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); * @return int */ +/** + * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * + * @param p_hwfn + * + * @return int + */ +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); + int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 26bed26b9071..8de644b4721e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; - u8 page_cnt; + u8 page_cnt, i; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -345,12 +345,30 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->mf_mode = MF_NPAR; p_ramrod->outer_tag_config.outer_tag.tci = - cpu_to_le16(p_hwfn->hw_info.ovlan); - if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; p_ramrod->outer_tag_config.enable_stag_pri_change = 1; } + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode. + */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + else + p_ramrod->outer_tag_config.enable_stag_pri_change = 0; + + p_ramrod->outer_tag_config.outer_tag.tci |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); + } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, @@ -431,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EOPNOTSUPP; + + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { + DP_INFO(p_hwfn, "Invalid priority type %d\n", + p_hwfn->ufp_info.pri_type); + return -EINVAL; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; + else + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index 773558fc0697..16d1a21cdff9 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) struct fip_vlan *vlan; #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + unsigned long flags = 0; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) @@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) kfree_skb(skb); return; } - qed_ops->ll2->start_xmit(qedf->cdev, skb); + + set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); + qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); } static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, @@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); } /* Process incoming FIP frames. */ diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 284ccb566b19..6c19015975a8 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -994,7 +994,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); return 0; } diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 7ec7f6e00fb8..ff21aa1fd939 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1150,7 +1150,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) if (vlanid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); - rc = qedi_ops->ll2->start_xmit(cdev, skb); + rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", rc); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 266c1fb45387..5eb022953aca 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info { bool enable_ip_cksum; bool enable_l4_cksum; bool calc_ip_len; + bool remove_stag; }; #define QED_LL2_UNUSED_HANDLE (0xff) @@ -220,6 +221,11 @@ struct qed_ll2_params { u8 ll2_mac_address[ETH_ALEN]; }; +enum qed_ll2_xmit_flags { + /* FIP discovery packet */ + QED_LL2_XMIT_FLAGS_FIP_DISCOVERY +}; + struct qed_ll2_ops { /** * @brief start - initializes ll2 @@ -245,10 +251,12 @@ struct qed_ll2_ops { * * @param cdev * @param skb + * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. * * @return 0 on success, otherwise error value. */ - int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb); + int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags); /** * @brief register_cb_ops - protocol driver register the callback for Rx/Tx -- cgit v1.2.3 From 724e47a149f504cbc9c799804bb46ea81a911909 Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Mon, 7 May 2018 09:21:57 -0400 Subject: net-next/hinic: add pci device ids for 25ge and 100ge card This patch adds PCI device IDs to support 25GE and 100GE card: 1. Add device id 0x0201 for HINIC 100GE dual port card. 2. Add device id 0x0200 for HINIC 25GE dual port card. 3. Macro of device id 0x1822 is modified for HINIC 25GE quad port card. Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index eb53bd93065e..5b122728dcb4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -51,7 +51,9 @@ static unsigned int rx_weight = 64; module_param(rx_weight, uint, 0644); MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); -#define PCI_DEVICE_ID_HI1822_PF 0x1822 +#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 +#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200 +#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201 #define HINIC_WQ_NAME "hinic_dev" @@ -1097,7 +1099,9 @@ static void hinic_remove(struct pci_dev *pdev) } static const struct pci_device_id hinic_pci_table[] = { - { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, { 0, 0} }; MODULE_DEVICE_TABLE(pci, hinic_pci_table); -- cgit v1.2.3 From cc559c1ac250a6025bd4a9528e424b8da250655b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 8 May 2018 03:18:38 -0400 Subject: bnxt_en: Fix firmware message delay loop regression. A recent change to reduce delay granularity waiting for firmware reponse has caused a regression. With a tighter delay loop, the driver may see the beginning part of the response faster. The original 5 usec delay to wait for the rest of the message is not long enough and some messages are detected as invalid. Increase the maximum wait time from 5 usec to 20 usec. Also, fix the debug message that shows the total delay time for the response when the message times out. With the new logic, the delay time is not fixed per iteration of the loop, so we define a macro to show the total delay time. Fixes: 9751e8e71487 ("bnxt_en: reduce timeout on initial HWRM calls") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 +++++++ 2 files changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index efe5c7203f60..168342ac35c5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3530,6 +3530,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, HWRM_RESP_LEN_SFT; valid = bp->hwrm_cmd_resp_addr + len - 1; } else { + int j; + /* Check if response len is updated */ for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> @@ -3547,14 +3549,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (i >= tmo_count) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - timeout, le16_to_cpu(req->req_type), + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len); return -1; } /* Last byte of resp contains valid bit */ valid = bp->hwrm_cmd_resp_addr + len - 1; - for (i = 0; i < 5; i++) { + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); if (*valid) @@ -3562,9 +3565,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, udelay(1); } - if (i >= 5) { + if (j >= HWRM_VALID_BIT_DELAY_USEC) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - timeout, le16_to_cpu(req->req_type), + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len, *valid); return -1; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 8df1d8b9d2e3..a9c210e70868 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -539,6 +539,13 @@ struct rx_tpa_end_cmp_ext { #define HWRM_MIN_TIMEOUT 25 #define HWRM_MAX_TIMEOUT 40 +#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ + ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ + (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ + ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) + +#define HWRM_VALID_BIT_DELAY_USEC 20 + #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 #define BNXT_TX_EVENT 4 -- cgit v1.2.3 From dac0490718bd17df5e3995ffca14255e5f9ed22d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 8 May 2018 03:18:39 -0400 Subject: bnxt_en: Check unsupported speeds in bnxt_update_link() on PF only. Only non-NPAR PFs need to actively check and manage unsupported link speeds. NPAR functions and VFs do not control the link speed and should skip the unsupported speed detection logic, to avoid warning messages from firmware rejecting the unsupported firmware calls. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 168342ac35c5..cd3ab788936f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6462,6 +6462,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } mutex_unlock(&bp->hwrm_cmd_lock); + if (!BNXT_SINGLE_PF(bp)) + return 0; + diff = link_info->support_auto_speeds ^ link_info->advertising; if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { -- cgit v1.2.3 From 7328a23c063a9ecf56314fb9631889c1820bd0ce Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 8 May 2018 03:18:40 -0400 Subject: bnxt_en: Read phy eeprom A2h address only when optical diagnostics is supported. For SFP+ modules, 0xA2 page is available only when Diagnostic Monitoring Type [Address A0h, Byte 92] is implemented. Extend bnxt_get_module_info(), to read optical diagnostics support at offset 92(0x5c) and set eeprom_len length to ETH_MODULE_SFF_8436_LEN (to exclude A2 page), if dianostics is not supported. Also in bnxt_get_module_info(), module id is read from offset 0x5e which is not correct. It was working by accident, as offset was not effective without setting enables flag in the firmware request. SFP module id is present at location 0. Fix this by removing the offset and read it from location 0. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 +-- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 20 ++++++++------------ 2 files changed, 9 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a9c210e70868..9b14eb610b9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1414,8 +1414,7 @@ struct bnxt { #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 -#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e -#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c #define SFF_MODULE_ID_SFP 0x3 #define SFF_MODULE_ID_QSFP 0xc #define SFF_MODULE_ID_QSFP_PLUS 0xd diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index ad98b78f5aa1..7270c8b0cef3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2184,9 +2184,8 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, static int bnxt_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { + u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; struct bnxt *bp = netdev_priv(dev); - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; int rc; /* No point in going further if phy status indicates @@ -2201,21 +2200,19 @@ static int bnxt_get_module_info(struct net_device *dev, if (bp->hwrm_spec_code < 0x10202) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = I2C_DEV_ADDR_A0; - req.page_number = 0; - req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR); - req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE; - req.port_id = cpu_to_le16(bp->pf.port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, + SFF_DIAG_SUPPORT_OFFSET + 1, + data); if (!rc) { - u32 module_id = le32_to_cpu(output->data[0]); + u8 module_id = data[0]; + u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; switch (module_id) { case SFF_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; case SFF_MODULE_ID_QSFP: case SFF_MODULE_ID_QSFP_PLUS: @@ -2231,7 +2228,6 @@ static int bnxt_get_module_info(struct net_device *dev, break; } } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } -- cgit v1.2.3 From 707e7e96602675beb5e09bb994195663da6eb56d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 8 May 2018 03:18:41 -0400 Subject: bnxt_en: Always forward VF MAC address to the PF. The current code already forwards the VF MAC address to the PF, except in one case. If the VF driver gets a valid MAC address from the firmware during probe time, it will not forward the MAC address to the PF, incorrectly assuming that the PF already knows the MAC address. This causes "ip link show" to show zero VF MAC addresses for this case. This assumption is not correct. Newer firmware remembers the VF MAC address last used by the VF and provides it to the VF driver during probe. So we need to always forward the VF MAC address to the PF. The forwarded MAC address may now be the PF assigned MAC address and so we need to make sure we approve it for this case. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cd3ab788936f..dfa0839f6656 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8678,8 +8678,8 @@ static int bnxt_init_mac_addr(struct bnxt *bp) memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { eth_hw_addr_random(bp->dev); - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); } + rc = bnxt_approve_mac(bp, bp->dev->dev_addr); #endif } return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index cc21d874eb6e..a64910892c25 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -923,7 +923,8 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { if (is_valid_ether_addr(req->dflt_mac_addr) && ((vf->flags & BNXT_VF_TRUST) || - (!is_valid_ether_addr(vf->mac_addr)))) { + !is_valid_ether_addr(vf->mac_addr) || + ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); } -- cgit v1.2.3 From 2b999ba899059eed00a03d029894a62486e7e2bc Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Fri, 4 May 2018 17:21:03 +0200 Subject: net: phy: sfp: handle cases where neither BR, min nor BR, max is given When computing the bitrate using values read from an SFP module EEPROM, we use the nominal BR plus BR,min and BR,max to determine the boundaries. But in some cases BR,min and BR,max aren't provided, which led the SFP code to end up having the nominal value for both the minimum and maximum bitrate values. When using a passive cable, the nominal value should be used as the maximum one, and there is no minimum one so we should use 0. Signed-off-by: Antoine Tenart Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/sfp-bus.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 0381da78d228..b2f4519dfb84 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -132,6 +132,13 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, br_max = br_nom + br_nom * id->ext.br_min / 100; br_min = br_nom - br_nom * id->ext.br_min / 100; } + + /* When using passive cables, in case neither BR,min nor BR,max + * are specified, set br_min to 0 as the nominal value is then + * used as the maximum. + */ + if (br_min == br_max && id->base.sfp_ct_passive) + br_min = 0; } /* Set ethtool support from the compliance fields. */ -- cgit v1.2.3 From bf516e7d8b1c1ff7706d56961306fa7e19f352b6 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 4 May 2018 10:42:04 +0800 Subject: rtlwifi: remove duplicate definition of antenna number for btcoex Two enumerations bt_total_ant_num and bt_ant_num are identical, so one can be removed. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/wifi.h | 5 ----- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index fd7928fdbd1a..3f2295fdb02d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2574,11 +2574,11 @@ void rtl92ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; - rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X2; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; } else { rtlpriv->btcoexist.btc_info.btcoexist = 1; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; - rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X1; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X1; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index ce1754054a07..208010fcde21 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2842,11 +2842,6 @@ enum bt_co_type { BT_RTL8812A = 11, }; -enum bt_total_ant_num { - ANT_TOTAL_X2 = 0, - ANT_TOTAL_X1 = 1 -}; - enum bt_cur_state { BT_OFF = 0, BT_ON = 1, -- cgit v1.2.3 From 9c4a121e82634aa000a702c98cd6f05b27d6e186 Mon Sep 17 00:00:00 2001 From: Sean Lanigan Date: Fri, 4 May 2018 16:48:23 +1000 Subject: brcmfmac: Add support for bcm43364 wireless chipset Add support for the BCM43364 chipset via an SDIO interface, as used in e.g. the Murata 1FX module. The BCM43364 uses the same firmware as the BCM43430 (which is already included), the only difference is the omission of Bluetooth. However, the SDIO_ID for the BCM43364 is 02D0:A9A4, giving it a MODALIAS of sdio:c00v02D0dA9A4, which doesn't get recognised and hence doesn't load the brcmfmac module. Adding the 'A9A4' ID in the appropriate place triggers the brcmfmac driver to load, and then correctly use the firmware file 'brcmfmac43430-sdio.bin'. Signed-off-by: Sean Lanigan Acked-by: Ulf Hansson Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 1 + include/linux/mmc/sdio_ids.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 0b68240ec7b4..a1915411c280 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -963,6 +963,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index cdd66a5fbd5e..0a7abe8a407f 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -35,6 +35,7 @@ #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf -- cgit v1.2.3 From 4793e5a954faf2fa5e88726332bf851fbebcbf1c Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 6 May 2018 09:53:53 +0200 Subject: mwifiex: delete unneeded include Nothing that is defined in 11ac.h is referenced in cmdevt.c. Signed-off-by: Julia Lawall Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 7014f440e6f8..9cfcdf6bec52 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -25,7 +25,6 @@ #include "main.h" #include "wmm.h" #include "11n.h" -#include "11ac.h" static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); -- cgit v1.2.3 From 4f9fb990013c03c5d56266fb4c439816cd84e1a6 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Tue, 8 May 2018 00:10:58 +0530 Subject: mwifiex: increase TX threashold to avoid TX timeout during ED MAC test While carrying energy detection(ED) tests, the chip will stop transmission upon detecting an energy in the connected channel. As a feedback, driver will stop dequeuing TX packets and due to which wmm_tx_pending keep incremeting. Once wmm_tx_pending reaches 100, driver calls netif_tx_stop_queue(). If TX ques is not restarted within 5(watchdog_timeo) seconds, it will result in TX timeout. The ED test is carried out for one minute and the current threshold of 100 is easily overcome by the traffic, cuasing TX timeouts. To fix this increase the threshold to 400. Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 7c95c1279548..8ae74ed78805 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -84,8 +84,8 @@ enum { #define MWIFIEX_TIMER_10S 10000 #define MWIFIEX_TIMER_1S 1000 -#define MAX_TX_PENDING 100 -#define LOW_TX_PENDING 80 +#define MAX_TX_PENDING 400 +#define LOW_TX_PENDING 380 #define HIGH_RX_PENDING 50 #define LOW_RX_PENDING 20 -- cgit v1.2.3 From 3c6a67dd6697e25bfec0b85a008ec4a3c482d993 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:54 -0700 Subject: fm10k: setup VLANs for l2 accelerated macvlan interfaces We have support for accelerating macvlan devices via the .ndo_dfwd_add_station() netdev op. These accelerated macvlan MAC addresses are stored in the l2_accel structure, separate from the unicast or multicast address lists. If a VLAN is added on top of the macvlan device by the stack, traffic will not properly flow to the macvlan. This occurs because we fail to setup the VLANs for l2_accel MAC addresses. In the non-offloaded case the MAC address is added to the unicast address list, and thus the normal setup for enabling VLANs works as expected. We also need to add VLANs marked from .ndo_vlan_rx_add_vid() into the l2_accel MAC addresses. Otherwise, VLAN traffic will not properly be received by the VLAN devices attached to the offloaded macvlan devices. Fix this by adding necessary logic to setup VLANs not only for the unicast and multicast addresses, but also the l2_accel list. We need similar logic in dfwd_add_station, dfwd_del_station, fm10k_update_vid, and fm10k_restore_rx_state. Signed-off-by: Jacob Keller Reviewed-by: Alexander Duyck Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 50 ++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index c879af72bbf5..0dc9f2dbc1ad 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -907,7 +907,9 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) { struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_l2_accel *l2_accel = interface->l2_accel; struct fm10k_hw *hw = &interface->hw; + u16 glort; s32 err; int i; @@ -975,6 +977,22 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (err) goto err_out; + /* Update L2 accelerated macvlan addresses */ + if (l2_accel) { + for (i = 0; i < l2_accel->size; i++) { + struct net_device *sdev = l2_accel->macvlan[i]; + + if (!sdev) + continue; + + glort = l2_accel->dglort + 1 + i; + + fm10k_queue_mac_request(interface, glort, + sdev->dev_addr, + vid, set); + } + } + /* set VLAN ID prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); @@ -1214,6 +1232,22 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) fm10k_queue_mac_request(interface, glort, hw->mac.addr, vid, true); + + /* synchronize macvlan addresses */ + if (l2_accel) { + for (i = 0; i < l2_accel->size; i++) { + struct net_device *sdev = l2_accel->macvlan[i]; + + if (!sdev) + continue; + + glort = l2_accel->dglort + 1 + i; + + fm10k_queue_mac_request(interface, glort, + sdev->dev_addr, + vid, true); + } + } } /* update xcast mode before synchronizing addresses if host's mailbox @@ -1430,7 +1464,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; int size = 0, i; - u16 glort; + u16 vid, glort; /* The hardware supported by fm10k only filters on the destination MAC * address. In order to avoid issues we only support offloading modes @@ -1510,6 +1544,12 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, hw->mac.default_vid, true); } + for (vid = fm10k_find_next_vlan(interface, 0); + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + vid, true); + fm10k_mbx_unlock(interface); return sdev; @@ -1522,8 +1562,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; struct net_device *sdev = priv; + u16 vid, glort; int i; - u16 glort; if (!l2_accel) return; @@ -1550,6 +1590,12 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) hw->mac.default_vid, false); } + for (vid = fm10k_find_next_vlan(interface, 0); + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + vid, false); + fm10k_mbx_unlock(interface); /* record removal */ -- cgit v1.2.3 From 2ead8ae110c6b62fe4d1d1bf04855e86582b96f5 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:55 -0700 Subject: fm10k: reduce duplicate fm10k_stat macro code Share some of the code for setting up fm10k_stat macros by implementing an FM10K_STAT_FIELDS macro which we can use when setting up the type specific macros. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 29 ++++++++++++------------ 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index eeac2b75a195..471dfd92d41b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -11,12 +11,17 @@ struct fm10k_stats { int stat_offset; }; -#define FM10K_NETDEV_STAT(_net_stat) { \ - .stat_string = #_net_stat, \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +#define FM10K_STAT_FIELDS(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ } +/* netdevice statistics */ +#define FM10K_NETDEV_STAT(_net_stat) \ + FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \ + _net_stat) + static const struct fm10k_stats fm10k_gstrings_net_stats[] = { FM10K_NETDEV_STAT(tx_packets), FM10K_NETDEV_STAT(tx_bytes), @@ -34,11 +39,9 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = { #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) -#define FM10K_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \ - .stat_offset = offsetof(struct fm10k_intfc, _stat) \ -} +/* General interface statistics */ +#define FM10K_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat) static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("tx_restart_queue", restart_queue), @@ -75,11 +78,9 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), }; -#define FM10K_MBX_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \ - .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \ -} +/* mailbox statistics */ +#define FM10K_MBX_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat) static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_tx_busy", tx_busy), -- cgit v1.2.3 From d63bb21a7e722fcaa6cc6a217f21fe25a9e2c89e Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:56 -0700 Subject: fm10k: use variadic arguments to fm10k_add_stat_strings Instead of using a fixed prefix string we setup before each call to fm10k_add_stat_strings, modify the helper to take variadic arguments and pass them to vsnprintf. This requires changing the fm10k_stat strings to take % format specifiers where necessary, but the resulting code is much simpler. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 53 ++++++++++++------------ 1 file changed, 27 insertions(+), 26 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 471dfd92d41b..17d2388e71a2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -6,6 +6,11 @@ #include "fm10k.h" struct fm10k_stats { + /* The stat_string is expected to be a format string formatted using + * vsnprintf by fm10k_add_stat_strings. Every member of a stats array + * should use the same format specifiers as they will be formatted + * using the same variadic arguments. + */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; @@ -94,15 +99,13 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; -#define FM10K_QUEUE_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \ - .stat_offset = offsetof(struct fm10k_ring, _stat) \ -} +/* per-queue ring statistics */ +#define FM10K_QUEUE_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat) static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { - FM10K_QUEUE_STAT("packets", stats.packets), - FM10K_QUEUE_STAT("bytes", stats.bytes), + FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets), + FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes), }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) @@ -132,16 +135,18 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(u8 **p, const char *prefix, - const struct fm10k_stats stats[], - const unsigned int size) +static void fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], + const unsigned int size, ...) { unsigned int i; for (i = 0; i < size; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s%s", - prefix, stats[i].stat_string); + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); *p += ETH_GSTRING_LEN; + va_end(args); } } @@ -150,31 +155,27 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) struct fm10k_intfc *interface = netdev_priv(dev); unsigned int i; - fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats, + fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats, FM10K_NETDEV_STATS_LEN); - fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats, + fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats, + fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats, + fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats, FM10K_PF_STATS_LEN); for (i = 0; i < interface->hw.mac.max_queues; i++) { - char prefix[ETH_GSTRING_LEN]; - - snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); - fm10k_add_stat_strings(&data, prefix, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN, + "tx", i); - snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); - fm10k_add_stat_strings(&data, prefix, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN, + "rx", i); } } -- cgit v1.2.3 From 36592d6ce8d38590894fb34329b0786386ee75bc Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:57 -0700 Subject: fm10k: use macro to avoid passing the array and size separately Avoid potential bugs with fm10k_add_stat_strings and fm10k_add_ethtool_stats by using a macro to calculate the ARRAY_SIZE when passing. This helps ensure that the size is always correct. Note that it assumes we only pass static const fm10k_stat arrays, and that evaluation of the argument won't have side effects. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 48 +++++++++++------------- 1 file changed, 21 insertions(+), 27 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 17d2388e71a2..09fa1a30ee3e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -135,8 +135,8 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], - const unsigned int size, ...) +static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], + const unsigned int size, ...) { unsigned int i; @@ -150,31 +150,28 @@ static void fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], } } +#define fm10k_add_stat_strings(p, stats, ...) \ + __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int i; - fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats, - FM10K_NETDEV_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats); - fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats, - FM10K_GLOBAL_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats); - fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats, - FM10K_PF_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats); for (i = 0; i < interface->hw.mac.max_queues; i++) { fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN, "tx", i); fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN, "rx", i); } } @@ -220,9 +217,9 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) } } -static void fm10k_add_ethtool_stats(u64 **data, void *pointer, - const struct fm10k_stats stats[], - const unsigned int size) +static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, + const struct fm10k_stats stats[], + const unsigned int size) { unsigned int i; char *p; @@ -256,6 +253,9 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer, } } +#define fm10k_add_ethtool_stats(data, pointer, stats) \ + __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + static void fm10k_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) @@ -266,20 +266,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, fm10k_update_stats(interface); - fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats, - FM10K_NETDEV_STATS_LEN); + fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats); - fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats, - FM10K_GLOBAL_STATS_LEN); + fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats); fm10k_add_ethtool_stats(&data, &interface->hw.mbx, - fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); + fm10k_gstrings_mbx_stats); if (interface->hw.mac.type != fm10k_mac_vf) { fm10k_add_ethtool_stats(&data, interface, - fm10k_gstrings_pf_stats, - FM10K_PF_STATS_LEN); + fm10k_gstrings_pf_stats); } for (i = 0; i < interface->hw.mac.max_queues; i++) { @@ -287,13 +283,11 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, ring = interface->tx_ring[i]; fm10k_add_ethtool_stats(&data, ring, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_gstrings_queue_stats); ring = interface->rx_ring[i]; fm10k_add_ethtool_stats(&data, ring, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_gstrings_queue_stats); } } -- cgit v1.2.3 From 454ca380ce3cac79bfd295f5a7ae15ec26ff1b67 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:58 -0700 Subject: fm10k: warn if the stat size is unknown Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 09fa1a30ee3e..7657daa27298 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -248,6 +248,8 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, *((*data)++) = *(u8 *)p; break; default: + WARN_ONCE(1, "unexpected stat size for %s", + stats[i].stat_string); *((*data)++) = 0; } } -- cgit v1.2.3 From 0a3e92de1be42b5dbed4b81f835740d3f58c5430 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 12 Apr 2018 11:15:59 -0700 Subject: fm10k: don't protect fm10k_queue_mac_request by fm10k_host_mbx_ready We don't actually need to check if the host mbx is ready when queuing MAC requests, because these are not handled by a special queue which queues up requests until the mailbox is capable of handling them. Pull these requests outside the fm10k_host_mbx_ready() check, as it is not necessary. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 0dc9f2dbc1ad..929f538d28bc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1537,12 +1537,12 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, glort = l2_accel->dglort + 1 + i; - if (fm10k_host_mbx_ready(interface)) { + if (fm10k_host_mbx_ready(interface)) hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); - fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - hw->mac.default_vid, true); - } + + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + hw->mac.default_vid, true); for (vid = fm10k_find_next_vlan(interface, 0); vid < VLAN_N_VID; @@ -1583,12 +1583,12 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) glort = l2_accel->dglort + 1 + i; - if (fm10k_host_mbx_ready(interface)) { + if (fm10k_host_mbx_ready(interface)) hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); - fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - hw->mac.default_vid, false); - } + + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + hw->mac.default_vid, false); for (vid = fm10k_find_next_vlan(interface, 0); vid < VLAN_N_VID; -- cgit v1.2.3 From d985888faae6588c8ce9e45ad1e4a3ab5f0376b4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 8 May 2018 19:37:07 -0700 Subject: nfp: bpf: support setting the RX queue index BPF has access to all internal FW datapath structures. Including the structure containing RX queue selection. With little coordination with the datapath we can let the offloaded BPF select the RX queue. We just need a way to tell the datapath that queue selection has already been done and it shouldn't overwrite it. Define a bit to tell datapath BPF already selected a queue (QSEL_SET), if the selected queue is not enabled (>= number of enabled queues) datapath will perform normal RSS. BPF queue selection on the NIC can be used to replace standard datapath RSS with fully programmable BPF/XDP RSS. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/fw.h | 1 + drivers/net/ethernet/netronome/nfp/bpf/jit.c | 47 +++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/main.c | 11 ++++++ drivers/net/ethernet/netronome/nfp/bpf/main.h | 8 ++++ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 28 +++++++++++++- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 22 ++++++----- 6 files changed, 105 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 3dbc21653ce5..4c7972e3db63 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -50,6 +50,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, NFP_BPF_CAP_TYPE_MAPS = 3, NFP_BPF_CAP_TYPE_RANDOM = 4, + NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, }; struct nfp_bpf_cap_tlv_func { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 326a2085d650..a4d3da215863 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -42,6 +42,7 @@ #include "main.h" #include "../nfp_asm.h" +#include "../nfp_net_ctrl.h" /* --- NFP prog --- */ /* Foreach "multiple" entries macros provide pos and next pointers. @@ -1470,6 +1471,38 @@ nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 jmp_tgt; + + jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; + + /* Make sure the queue id fits into FW field */ + emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), + ALU_OP_AND_NOT_B, reg_imm(0xff)); + emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); + + /* Set the 'queue selected' bit and the queue value */ + emit_shf(nfp_prog, pv_qsel_set(nfp_prog), + pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), + SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); + emit_ld_field(nfp_prog, + pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), + SHF_SC_NONE, 0); + /* Delay slots end here, we will jump over next instruction if queue + * value fits into the field. + */ + emit_ld_field(nfp_prog, + pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), + SHF_SC_NONE, 0); + + if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) + return -EINVAL; + + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -2160,6 +2193,17 @@ mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, false, wrp_lmem_store); } +static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + switch (meta->insn.off) { + case offsetof(struct xdp_md, rx_queue_index): + return nfp_queue_select(nfp_prog, meta); + } + + WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ + return -EOPNOTSUPP; +} + static int mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) @@ -2186,6 +2230,9 @@ static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { + if (meta->ptr.type == PTR_TO_CTX) + if (nfp_prog->type == BPF_PROG_TYPE_XDP) + return mem_stx_xdp(nfp_prog, meta); return mem_stx(nfp_prog, meta, 4); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index d72f9e7f42da..f1846d8f59cc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -334,6 +334,13 @@ nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value, return 0; } +static int +nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) +{ + bpf->queue_select = true; + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -376,6 +383,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) if (nfp_bpf_parse_cap_random(app->priv, value, length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_QUEUE_SELECT: + if (nfp_bpf_parse_cap_qsel(app->priv, value, length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 82682378d57f..8b143546ae85 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -82,10 +82,16 @@ enum static_regs { enum pkt_vec { PKT_VEC_PKT_LEN = 0, PKT_VEC_PKT_PTR = 2, + PKT_VEC_QSEL_SET = 4, + PKT_VEC_QSEL_VAL = 6, }; +#define PKT_VEL_QSEL_SET_BIT 4 + #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) +#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) +#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) #define stack_reg(np) reg_a(STATIC_REG_STACK) #define stack_imm(np) imm_b(np) @@ -139,6 +145,7 @@ enum pkt_vec { * @helpers.perf_event_output: output perf event to a ring buffer * * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) + * @queue_select: BPF can set the RX queue ID in packet vector */ struct nfp_app_bpf { struct nfp_app *app; @@ -181,6 +188,7 @@ struct nfp_app_bpf { } helpers; bool pseudo_random; + bool queue_select; }; enum nfp_bpf_map_use { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index e163f3cfa47d..844a9be6e55a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -467,6 +467,30 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } +static int +nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg; + + if (reg->type == PTR_TO_CTX) { + if (nfp_prog->type == BPF_PROG_TYPE_XDP) { + /* XDP ctx accesses must be 4B in size */ + switch (meta->insn.off) { + case offsetof(struct xdp_md, rx_queue_index): + if (nfp_prog->bpf->queue_select) + goto exit_check_ptr; + pr_vlog(env, "queue selection not supported by FW\n"); + return -EOPNOTSUPP; + } + } + pr_vlog(env, "unsupported store to context field\n"); + return -EOPNOTSUPP; + } +exit_check_ptr: + return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); +} + static int nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, struct bpf_verifier_env *env) @@ -522,8 +546,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.src_reg); if (is_mbpf_store(meta)) - return nfp_bpf_check_ptr(nfp_prog, meta, env, - meta->insn.dst_reg); + return nfp_bpf_check_store(nfp_prog, meta, env); + if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 5f2b2f24f4fa..faa4e131c136 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -183,16 +183,18 @@ enum shf_sc { #define OP_ALU_DST_LMEXTN 0x80000000000ULL enum alu_op { - ALU_OP_NONE = 0x00, - ALU_OP_ADD = 0x01, - ALU_OP_NOT = 0x04, - ALU_OP_ADD_2B = 0x05, - ALU_OP_AND = 0x08, - ALU_OP_SUB_C = 0x0d, - ALU_OP_ADD_C = 0x11, - ALU_OP_OR = 0x14, - ALU_OP_SUB = 0x15, - ALU_OP_XOR = 0x18, + ALU_OP_NONE = 0x00, + ALU_OP_ADD = 0x01, + ALU_OP_NOT = 0x04, + ALU_OP_ADD_2B = 0x05, + ALU_OP_AND = 0x08, + ALU_OP_AND_NOT_A = 0x0c, + ALU_OP_SUB_C = 0x0d, + ALU_OP_AND_NOT_B = 0x10, + ALU_OP_ADD_C = 0x11, + ALU_OP_OR = 0x14, + ALU_OP_SUB = 0x15, + ALU_OP_XOR = 0x18, }; enum alu_dst_ab { -- cgit v1.2.3 From 3e50d2da5850dd126b3e6a6e4387620d55b71db4 Mon Sep 17 00:00:00 2001 From: Nisar Sayed Date: Wed, 2 May 2018 21:09:17 +0530 Subject: microchip_t1: Add driver for Microchip LAN87XX T1 PHYs Add driver for Microchip LAN87XX T1 PHYs This patch support driver for Microchp T1 PHYs. There will be followup patches to this driver to support T1 PHY features such as cable diagnostics, signal quality indicator(SQI), sleep and wakeup (TC10) support. Signed-off-by: Nisar Sayed Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 5 +++ drivers/net/phy/Makefile | 1 + drivers/net/phy/microchip_t1.c | 74 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 drivers/net/phy/microchip_t1.c (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index edb8b9ab827f..ae2d69170dc3 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -360,6 +360,11 @@ config MICROCHIP_PHY help Supports the LAN88XX PHYs. +config MICROCHIP_T1_PHY + tristate "Microchip T1 PHYs" + ---help--- + Supports the LAN87XX PHYs. + config MICROSEMI_PHY tristate "Microsemi PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 701ca0b8717e..22183b9d7e08 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o +obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROSEMI_PHY) += mscc.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c new file mode 100644 index 000000000000..b1917dd1978a --- /dev/null +++ b/drivers/net/phy/microchip_t1.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Microchip Technology + +#include +#include +#include +#include + +/* Interrupt Source Register */ +#define LAN87XX_INTERRUPT_SOURCE (0x18) + +/* Interrupt Mask Register */ +#define LAN87XX_INTERRUPT_MASK (0x19) +#define LAN87XX_MASK_LINK_UP (0x0004) +#define LAN87XX_MASK_LINK_DOWN (0x0002) + +#define DRIVER_AUTHOR "Nisar Sayed " +#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" + +static int lan87xx_phy_config_intr(struct phy_device *phydev) +{ + int rc, val = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* unmask all source and clear them before enable */ + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF); + rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN; + } + + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); + + return rc < 0 ? rc : 0; +} + +static int lan87xx_phy_ack_interrupt(struct phy_device *phydev) +{ + int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + + return rc < 0 ? rc : 0; +} + +static struct phy_driver microchip_t1_phy_driver[] = { + { + .phy_id = 0x0007c150, + .phy_id_mask = 0xfffffff0, + .name = "Microchip LAN87xx T1", + + .features = SUPPORTED_100baseT_Full, + .flags = PHY_HAS_INTERRUPT, + + .config_init = genphy_config_init, + .config_aneg = genphy_config_aneg, + + .ack_interrupt = lan87xx_phy_ack_interrupt, + .config_intr = lan87xx_phy_config_intr, + + .suspend = genphy_suspend, + .resume = genphy_resume, + } +}; + +module_phy_driver(microchip_t1_phy_driver); + +static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = { + { 0x0007c150, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, microchip_t1_tbl); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From b2641e2ad456459a655da2433e3150d41640a6de Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Thu, 3 May 2018 17:28:11 +0100 Subject: net: hns3: Add support of hardware rx-vlan-offload to HNS3 VF driver This patch adds support of hardware rx-vlan-offload to VF driver. VF uses mailbox to convey PF to configure the hardware. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 +------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 5 +++++ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 12 ++++++++++++ 5 files changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c4e295094da7..729bcab947df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1626,8 +1626,6 @@ static struct pci_driver hns3_driver = { /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { - struct hnae3_handle *h = hns3_get_handle(netdev); - netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1656,15 +1654,11 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; - - if (!(h->flags & HNAE3_SUPPORT_VF)) - netdev->hw_features |= - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dd5d65c9cca6..084b90437e23 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4862,7 +4862,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b7ee91daea0c..af736a44607c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -652,6 +652,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7563335b0c7f..b6ae26ba0a46 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -276,6 +276,11 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); + } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { + struct hnae3_handle *handle = &vport->nic; + bool en = mbx_req->msg[2] ? true : false; + + status = hclge_en_hw_strip_rxvtag(handle, en); } if (gen_resp) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2b8426412cc9..2dbffcef7109 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -830,6 +830,17 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); } +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data; + + msg_data = enable ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, + 1, false, NULL, 0); +} + static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1825,6 +1836,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, -- cgit v1.2.3 From 4dbbe8dde8485b89bce8bbbe7564337fd7eed69f Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 4 May 2018 10:01:38 +0100 Subject: net: stmmac: Add support for U32 TC filter using Flexible RX Parser This adds support for U32 filter by using an HW only feature called Flexible RX Parser. This allow us to match any given packet field with a pattern and accept/reject or even route the packet to a specific DMA channel. Right now we only support acception or rejection of frame and we only support simple rules. Though, the Parser has the flexibility of jumping to specific rules as an if condition so complex rules can be established. This is only supported in GMAC5.10+. The following commands can be used to test this code: 1) Setup an ingress qdisk: # tc qdisc add dev eth0 handle ffff: ingress 2) Setup a filter (e.g. filter by IP): # tc filter add dev eth0 parent ffff: protocol ip u32 match ip \ src 192.168.0.3 skip_sw action drop In every tests performed we always used the "skip_sw" flag to make sure only the RX Parser was involved. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- drivers/net/ethernet/stmicro/stmmac/common.h | 5 + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 4 + drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 3 + drivers/net/ethernet/stmicro/stmmac/dwmac5.c | 195 ++++++++++++++ drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 13 + drivers/net/ethernet/stmicro/stmmac/hwif.c | 8 + drivers/net/ethernet/stmicro/stmmac/hwif.h | 25 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 29 +++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 59 +++++ drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 295 ++++++++++++++++++++++ 12 files changed, 636 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index e3b578b4f7cb..68e9e2640c62 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -5,7 +5,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ - $(stmmac-y) + stmmac_tc.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 627e905b6d76..a679cb729d1d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -353,6 +353,10 @@ struct dma_features { unsigned int rx_fifo_size; /* Automotive Safety Package */ unsigned int asp; + /* RX Parser */ + unsigned int frpsel; + unsigned int frpbs; + unsigned int frpes; }; /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ @@ -412,6 +416,7 @@ struct mac_device_info { const struct stmmac_dma_ops *dma; const struct stmmac_mode_ops *mode; const struct stmmac_hwtimestamp *ptp; + const struct stmmac_tc_ops *tc; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; void __iomem *pcsr; /* vpointer to device CSRs */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 03eab9077c1c..6330a55953df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -194,6 +194,9 @@ enum power_event { /* MAC HW features3 bitmap */ #define GMAC_HW_FEAT_ASP GENMASK(29, 28) +#define GMAC_HW_FEAT_FRPES GENMASK(14, 13) +#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) +#define GMAC_HW_FEAT_FRPSEL BIT(10) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) @@ -202,6 +205,7 @@ enum power_event { /* MTL registers */ #define MTL_OPERATION_MODE 0x00000c00 +#define MTL_FRPE BIT(15) #define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) #define MTL_OPERATION_SCHALG_WRR (0x0 << 5) #define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 7289b3b47d8e..a7121a7d9391 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -795,6 +795,7 @@ const struct stmmac_ops dwmac510_ops = { .safety_feat_config = dwmac5_safety_feat_config, .safety_feat_irq_status = dwmac5_safety_feat_irq_status, .safety_feat_dump = dwmac5_safety_feat_dump, + .rxp_config = dwmac5_rxp_config, }; int dwmac4_setup(struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37d457306d1..117c3a5288f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -379,6 +379,9 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* 5.10 Features */ dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; + dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; + dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; + dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; } /* Enable/disable TSO feature and set MSS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 2978550bb7f6..b2becb80a697 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -7,6 +7,7 @@ #include "common.h" #include "dwmac4.h" #include "dwmac5.h" +#include "stmmac.h" struct dwmac5_error_desc { bool valid; @@ -299,3 +300,197 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, *desc = dwmac5_all_errors[module].desc[offset].desc; return 0; } + +static int dwmac5_rxp_disable(void __iomem *ioaddr) +{ + u32 val; + int ret; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val &= ~MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); + + ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + val & RXPI, 1, 10000); + if (ret) + return ret; + return 0; +} + +static void dwmac5_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val |= MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); +} + +static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & ADDR; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Write OP */ + val |= WRRDN; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Start Write */ + val |= STARTBUSY; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count, + u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + GMAC_CONFIG); + val = old_val & ~GMAC_CONFIG_RE; + writel(val, ioaddr + GMAC_CONFIG); + + /* Disable RX Parser */ + ret = dwmac5_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & NPE; + val |= nve & NVE; + writel(val, ioaddr + MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwmac5_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + GMAC_CONFIG); + return ret; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index bd4c466d303e..cc810aff7100 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -11,6 +11,17 @@ #define PRTYEN BIT(1) #define TMOUTEN BIT(0) +#define MTL_RXP_CONTROL_STATUS 0x00000ca0 +#define RXPI BIT(31) +#define NPE GENMASK(23, 16) +#define NVE GENMASK(7, 0) +#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0 +#define STARTBUSY BIT(31) +#define RXPEIEC GENMASK(22, 21) +#define RXPEIEE BIT(20) +#define WRRDN BIT(16) +#define ADDR GENMASK(15, 0) +#define MTL_RXP_IACC_DATA 0x00000cb4 #define MTL_ECC_CONTROL 0x00000cc0 #define TSOEE BIT(4) #define MRXPEE BIT(3) @@ -48,5 +59,7 @@ int dwmac5_safety_feat_irq_status(struct net_device *ndev, struct stmmac_safety_stats *stats); int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, int index, unsigned long *count, const char **desc); +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 2b0a7e79de00..9acc8d2f1039 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -77,6 +77,7 @@ static const struct stmmac_hwif_entry { const void *mac; const void *hwtimestamp; const void *mode; + const void *tc; int (*setup)(struct stmmac_priv *priv); int (*quirks)(struct stmmac_priv *priv); } stmmac_hw[] = { @@ -90,6 +91,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac100_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, + .tc = NULL, .setup = dwmac100_setup, .quirks = stmmac_dwmac1_quirks, }, { @@ -101,6 +103,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac1000_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, + .tc = NULL, .setup = dwmac1000_setup, .quirks = stmmac_dwmac1_quirks, }, { @@ -112,6 +115,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, + .tc = NULL, .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { @@ -123,6 +127,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, + .tc = NULL, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -134,6 +139,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, + .tc = NULL, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -145,6 +151,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac510_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, } @@ -196,6 +203,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) mac->mac = entry->mac; mac->ptp = entry->hwtimestamp; mac->mode = entry->mode; + mac->tc = entry->tc; priv->hw = mac; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index bfad61607f07..b7539a14e6ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -5,10 +5,12 @@ #ifndef __STMMAC_HWIF_H__ #define __STMMAC_HWIF_H__ +#include + #define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ ({ \ int __result = -EINVAL; \ - if ((__priv)->hw->__module->__cname) { \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \ (__priv)->hw->__module->__cname((__arg0), ##__args); \ __result = 0; \ } \ @@ -17,7 +19,7 @@ #define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ ({ \ int __result = -EINVAL; \ - if ((__priv)->hw->__module->__cname) \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \ __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ __result; \ }) @@ -232,6 +234,7 @@ struct mac_device_info; struct net_device; struct rgmii_adv; struct stmmac_safety_stats; +struct stmmac_tc_entry; /* Helpers to program the MAC core */ struct stmmac_ops { @@ -301,6 +304,9 @@ struct stmmac_ops { struct stmmac_safety_stats *stats); int (*safety_feat_dump)(struct stmmac_safety_stats *stats, int index, unsigned long *count, const char **desc); + /* Flexible RX Parser */ + int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); }; #define stmmac_core_init(__priv, __args...) \ @@ -365,6 +371,8 @@ struct stmmac_ops { stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) #define stmmac_safety_feat_dump(__priv, __args...) \ stmmac_do_callback(__priv, mac, safety_feat_dump, __args) +#define stmmac_rxp_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rxp_config, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -419,6 +427,18 @@ struct stmmac_mode_ops { stmmac_do_void_callback(__priv, mode, clean_desc3, __args) struct stmmac_priv; +struct tc_cls_u32_offload; + +struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); + int (*setup_cls_u32)(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls); +}; + +#define stmmac_tc_init(__priv, __args...) \ + stmmac_do_callback(__priv, tc, init, __args) +#define stmmac_tc_setup_cls_u32(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls_u32, __args) extern const struct stmmac_ops dwmac100_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops; @@ -429,6 +449,7 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops; extern const struct stmmac_ops dwmac410_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac510_tc_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 2443f20e07bf..42fc76e76bf9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -76,6 +76,30 @@ struct stmmac_rx_queue { struct napi_struct napi ____cacheline_aligned_in_smp; }; +struct stmmac_tc_entry { + bool in_use; + bool in_hw; + bool is_last; + bool is_frag; + void *frag_ptr; + unsigned int table_pos; + u32 handle; + u32 prio; + struct { + u32 match_data; + u32 match_en; + u8 af:1; + u8 rf:1; + u8 im:1; + u8 nc:1; + u8 res1:4; + u8 frame_offset; + u8 ok_index; + u8 dma_ch_no; + u32 res2; + } __packed val; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_count_frames; @@ -151,6 +175,11 @@ struct stmmac_priv { unsigned long state; struct workqueue_struct *wq; struct work_struct service_task; + + /* TC Handling */ + unsigned int tc_entries_max; + unsigned int tc_off_max; + struct stmmac_tc_entry *tc_entries; }; enum stmmac_state { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b935478df55f..d9dbe1355896 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -45,6 +45,7 @@ #include #endif /* CONFIG_DEBUG_FS */ #include +#include #include "stmmac_ptp.h" #include "stmmac.h" #include @@ -3790,6 +3791,58 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } +static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct stmmac_priv *priv = cb_priv; + int ret = -EOPNOTSUPP; + + stmmac_disable_all_queues(priv); + + switch (type) { + case TC_SETUP_CLSU32: + if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + break; + default: + break; + } + + stmmac_enable_all_queues(priv); + return ret; +} + +static int stmmac_setup_tc_block(struct stmmac_priv *priv, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, + priv, priv); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + switch (type) { + case TC_SETUP_BLOCK: + return stmmac_setup_tc_block(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + static int stmmac_set_mac_address(struct net_device *ndev, void *addr) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -4028,6 +4081,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, + .ndo_setup_tc = stmmac_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = stmmac_poll_controller, #endif @@ -4227,6 +4281,11 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + ret = stmmac_tc_init(priv, priv); + if (!ret) { + ndev->hw_features |= NETIF_F_HW_TC; + } + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; priv->tso = true; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c new file mode 100644 index 000000000000..881c94b73e2f --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac TC Handling (HW only) + */ + +#include +#include +#include "common.h" +#include "dwmac4.h" +#include "dwmac5.h" +#include "stmmac.h" + +static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry) +{ + memset(entry, 0, sizeof(*entry)); + entry->in_use = true; + entry->is_last = true; + entry->is_frag = false; + entry->prio = ~0x0; + entry->handle = 0; + entry->val.match_data = 0x0; + entry->val.match_en = 0x0; + entry->val.af = 1; + entry->val.dma_ch_no = 0x0; +} + +static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls, + bool free) +{ + struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL; + u32 loc = cls->knode.handle; + int i; + + for (i = 0; i < priv->tc_entries_max; i++) { + entry = &priv->tc_entries[i]; + if (!entry->in_use && !first && free) + first = entry; + if (entry->handle == loc && !free) + dup = entry; + } + + if (dup) + return dup; + if (first) { + first->handle = loc; + first->in_use = true; + + /* Reset HW values */ + memset(&first->val, 0, sizeof(first->val)); + } + + return first; +} + +static int tc_fill_actions(struct stmmac_tc_entry *entry, + struct stmmac_tc_entry *frag, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *action_entry = entry; + const struct tc_action *act; + struct tcf_exts *exts; + LIST_HEAD(actions); + + exts = cls->knode.exts; + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + if (frag) + action_entry = frag; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(act, &actions, list) { + /* Accept */ + if (is_tcf_gact_ok(act)) { + action_entry->val.af = 1; + break; + } + /* Drop */ + if (is_tcf_gact_shot(act)) { + action_entry->val.rf = 1; + break; + } + + /* Unsupported */ + return -EINVAL; + } + + return 0; +} + +static int tc_fill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry, *frag = NULL; + struct tc_u32_sel *sel = cls->knode.sel; + u32 off, data, mask, real_off, rem; + u32 prio = cls->common.prio; + int ret; + + /* Only 1 match per entry */ + if (sel->nkeys <= 0 || sel->nkeys > 1) + return -EINVAL; + + off = sel->keys[0].off << sel->offshift; + data = sel->keys[0].val; + mask = sel->keys[0].mask; + + switch (ntohs(cls->common.protocol)) { + case ETH_P_ALL: + break; + case ETH_P_IP: + off += ETH_HLEN; + break; + default: + return -EINVAL; + } + + if (off > priv->tc_off_max) + return -EINVAL; + + real_off = off / 4; + rem = off % 4; + + entry = tc_find_entry(priv, cls, true); + if (!entry) + return -EINVAL; + + if (rem) { + frag = tc_find_entry(priv, cls, true); + if (!frag) { + ret = -EINVAL; + goto err_unuse; + } + + entry->frag_ptr = frag; + entry->val.match_en = (mask << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.match_data = (data << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.frame_offset = real_off; + entry->prio = prio; + + frag->val.match_en = (mask >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.match_data = (data >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.frame_offset = real_off + 1; + frag->prio = prio; + frag->is_frag = true; + } else { + entry->frag_ptr = NULL; + entry->val.match_en = mask; + entry->val.match_data = data; + entry->val.frame_offset = real_off; + entry->prio = prio; + } + + ret = tc_fill_actions(entry, frag, cls); + if (ret) + goto err_unuse; + + return 0; + +err_unuse: + if (frag) + frag->in_use = false; + entry->in_use = false; + return ret; +} + +static void tc_unfill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry; + + entry = tc_find_entry(priv, cls, false); + if (!entry) + return; + + entry->in_use = false; + if (entry->frag_ptr) { + entry = entry->frag_ptr; + entry->is_frag = false; + entry->in_use = false; + } +} + +static int tc_config_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + ret = tc_fill_entry(priv, cls); + if (ret) + return ret; + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + goto err_unfill; + + return 0; + +err_unfill: + tc_unfill_entry(priv, cls); + return ret; +} + +static int tc_delete_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + /* Set entry and fragments as not used */ + tc_unfill_entry(priv, cls); + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + return ret; + + return 0; +} + +static int tc_setup_cls_u32(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + switch (cls->command) { + case TC_CLSU32_REPLACE_KNODE: + tc_unfill_entry(priv, cls); + /* Fall through */ + case TC_CLSU32_NEW_KNODE: + return tc_config_knode(priv, cls); + case TC_CLSU32_DELETE_KNODE: + return tc_delete_knode(priv, cls); + default: + return -EOPNOTSUPP; + } +} + +static int tc_init(struct stmmac_priv *priv) +{ + struct dma_features *dma_cap = &priv->dma_cap; + unsigned int count; + + if (!dma_cap->frpsel) + return -EINVAL; + + switch (dma_cap->frpbs) { + case 0x0: + priv->tc_off_max = 64; + break; + case 0x1: + priv->tc_off_max = 128; + break; + case 0x2: + priv->tc_off_max = 256; + break; + default: + return -EINVAL; + } + + switch (dma_cap->frpes) { + case 0x0: + count = 64; + break; + case 0x1: + count = 128; + break; + case 0x2: + count = 256; + break; + default: + return -EINVAL; + } + + /* Reserve one last filter which lets all pass */ + priv->tc_entries_max = count; + priv->tc_entries = devm_kzalloc(priv->device, + sizeof(*priv->tc_entries) * count, GFP_KERNEL); + if (!priv->tc_entries) + return -ENOMEM; + + tc_fill_all_pass_entry(&priv->tc_entries[count - 1]); + + dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", + priv->tc_entries_max, priv->tc_off_max); + return 0; +} + +const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, +}; -- cgit v1.2.3 From 5bafeb6e7e878ce41c834806239f8c629a59d40a Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Fri, 4 May 2018 19:26:10 +0200 Subject: net: dsa: mv88e6xxx: 88E6141/6341 SERDES support The 88E6141/6341 switches (also known as Topaz) have 1 SGMII lane, which can be configured the same way as the SERDES lane on 88E6390. Signed-off-by: Marek Behun Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 ++ drivers/net/dsa/mv88e6xxx/serdes.c | 20 ++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/serdes.h | 3 +++ 3 files changed, 25 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 9d62e4acc01b..e7e079b1888c 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2529,6 +2529,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -2848,6 +2849,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6176_ops = { diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index fb058fd35c0d..880b2cf0a530 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -326,3 +326,23 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) return 0; } + +int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +{ + int err; + u8 cmode; + + if (port != 5) + return 0; + + err = mv88e6xxx_port_get_cmode(chip, port, &cmode); + if (err) + return err; + + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on); + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 1897c01c6e19..b6e5fbd46b5e 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -19,6 +19,8 @@ #define MV88E6352_ADDR_SERDES 0x0f #define MV88E6352_SERDES_PAGE_FIBER 0x01 +#define MV88E6341_ADDR_SERDES 0x15 + #define MV88E6390_PORT9_LANE0 0x09 #define MV88E6390_PORT9_LANE1 0x12 #define MV88E6390_PORT9_LANE2 0x13 @@ -42,6 +44,7 @@ #define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14) #define MV88E6390_SGMII_CONTROL_PDOWN BIT(11) +int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); -- cgit v1.2.3 From f663706a33ce32df69a2b3e85a80cb5a690f2234 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Tue, 8 May 2018 19:21:34 +0300 Subject: tun: Do SIOCGSKNS out of rtnl_lock() Since net ns of tun device is assigned on the device creation, and it never changes, we do not need to use any lock to get it from alive tun. Signed-off-by: Kirill Tkhai Signed-off-by: David S. Miller --- drivers/net/tun.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d3c04ab9752a..44d4f3d25350 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2850,10 +2850,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; + struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; struct ifreq ifr; - struct net *net; kuid_t owner; kgid_t group; int sndbuf; @@ -2877,14 +2877,18 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, */ return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, (unsigned int __user*)argp); - } else if (cmd == TUNSETQUEUE) + } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); + } else if (cmd == SIOCGSKNS) { + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + return open_related_ns(&net->ns, get_net_ns); + } ret = 0; rtnl_lock(); tun = tun_get(tfile); - net = sock_net(&tfile->sk); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) @@ -2914,14 +2918,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tfile->ifindex = ifindex; goto unlock; } - if (cmd == SIOCGSKNS) { - ret = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - goto unlock; - - ret = open_related_ns(&net->ns, get_net_ns); - goto unlock; - } ret = -EBADFD; if (!tun) -- cgit v1.2.3 From e5c9a705452acf59efe00cb84f086624fda010ab Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Wed, 9 May 2018 18:29:02 +0300 Subject: net/mlx4_core: Report driver version to FW If supported, write a driver version string to FW as part of the INIT_HCA command. Example of driver version: "Linux,mlx4_core,4.0-0" Signed-off-by: Eran Ben Elisha Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 12 ++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 +- include/linux/mlx4/device.h | 1 + 3 files changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index de6b3d416148..46dcbfbe4c5e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -165,6 +165,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [36] = "QinQ VST mode support", [37] = "sl to vl mapping table change event support", [38] = "user MAC support", + [39] = "Report driver version to FW support", }; int i; @@ -1038,6 +1039,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; if (field32 & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + if (field32 & (1 << 8)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW; MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); if (field32 & (1 << 17)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; @@ -1860,6 +1863,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 +#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140 +#define INIT_HCA_DRIVER_VERSION_SZ 0x40 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) @@ -1950,6 +1955,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { + u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4); + + strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1); + mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst); + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index c68da1986e51..cb9e923e8399 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -55,8 +55,8 @@ #include "fw_qos.h" #define DRV_NAME "mlx4_core" -#define PFX DRV_NAME ": " #define DRV_VERSION "4.0-0" +#define DRV_NAME_FOR_FW "Linux," DRV_NAME "," DRV_VERSION #define MLX4_FS_UDP_UC_EN (1 << 1) #define MLX4_FS_TCP_UC_EN (1 << 2) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 81d0799b6091..122e7e9d3091 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -225,6 +225,7 @@ enum { MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36, MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37, MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38, + MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39, }; enum { -- cgit v1.2.3 From 86a3e5d02c2006aaf085c940f037d2453ac44957 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 9 May 2018 18:29:03 +0300 Subject: net/mlx4_core: Add PCI calls for suspend/resume Implement suspend/resume callbacks in struct pci_driver. Signed-off-by: Yishai Hadas Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 46 +++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 211578ffc70d..b6aaf34d6648 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4125,12 +4125,58 @@ static const struct pci_error_handlers mlx4_err_handler = { .resume = mlx4_pci_resume, }; +static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + + mlx4_err(dev, "suspend was called\n"); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + mutex_unlock(&persist->interface_state_mutex); + + return 0; +} + +static int mlx4_resume(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + int ret = 0; + + mlx4_err(dev, "resume was called\n"); + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mutex_lock(&persist->interface_state_mutex); + if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { + ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, + nvfs, priv, 1); + if (!ret) { + ret = restore_current_port_types(dev, + dev->persist->curr_port_type, + dev->persist->curr_port_poss_type); + if (ret) + mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret); + } + } + mutex_unlock(&persist->interface_state_mutex); + + return ret; +} + static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, .shutdown = mlx4_shutdown, .remove = mlx4_remove_one, + .suspend = mlx4_suspend, + .resume = mlx4_resume, .err_handler = &mlx4_err_handler, }; -- cgit v1.2.3 From e57328384b6a9148df653216203408de3be3dda1 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 9 May 2018 18:29:04 +0300 Subject: net/mlx4_core: Use msi_x module param to limit num of MSI-X irqs Extend the boolean interpretation of msi_x module parameter to numerical, as follows: 0 - Don't use MSI-X. 1 - Use MSI-X, driver decides the num of MSI-X irqs. >=2 - Use MSI-X, limit number of MSI-X irqs to msi_x. In SRIOV, this limits the number of MSI-X irqs per VF. Signed-off-by: Tariq Toukan Cc: Ajaykumar Hotchandani Reviewed-by: Ajaykumar Hotchandani Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index b6aaf34d6648..80a75c80a463 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); static int msi_x = 1; module_param(msi_x, int, 0444); -MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); +MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x"); #else /* CONFIG_PCI_MSI */ @@ -2815,6 +2815,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) dev->caps.num_eqs - dev->caps.reserved_eqs, MAX_MSIX); + if (msi_x > 1) + nreq = min_t(int, nreq, msi_x); + entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); if (!entries) goto no_msi; @@ -4182,6 +4185,11 @@ static struct pci_driver mlx4_driver = { static int __init mlx4_verify_params(void) { + if (msi_x < 0) { + pr_warn("mlx4_core: bad msi_x: %d\n", msi_x); + return -1; + } + if ((log_num_mac < 0) || (log_num_mac > 7)) { pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; -- cgit v1.2.3 From 02317e68369b0feecc18e791016d38a8e4df35f7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 9 May 2018 11:38:49 -0400 Subject: net: dsa: mv88e6xxx: add a cascade port op Only the 88E6185 family has bits 15:12 Cascade Port bits in the Global Control 2 register. Hence inconsistent values are actually written in this register for other families. Add a .set_cascade_port operation to isolate the 88E6185 case, and call it from the device mapping setup function. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 10 +++++++++- drivers/net/dsa/mv88e6xxx/chip.h | 6 ++++++ drivers/net/dsa/mv88e6xxx/global1.c | 23 +++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/global1.h | 7 +++++-- 4 files changed, 43 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e7e079b1888c..9b61d5b65b0f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1046,6 +1046,13 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) return err; } + if (chip->info->ops->set_cascade_port) { + port = MV88E6XXX_CASCADE_PORT_MULTIPLE; + err = chip->info->ops->set_cascade_port(chip, port); + if (err) + return err; + } + return 0; } @@ -2158,7 +2165,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) /* Disable remote management, and set the switch's DSA device number. */ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, - MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE | (ds->index & 0x1f)); if (err) return err; @@ -2643,6 +2649,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, + .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, @@ -2911,6 +2918,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, + .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 4163c8099d0b..62234c2287a2 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -401,6 +401,12 @@ struct mv88e6xxx_ops { uint64_t *data); int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port); int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); + +#define MV88E6XXX_CASCADE_PORT_NONE 0xe +#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf + + int (*set_cascade_port)(struct mv88e6xxx_chip *chip, int port); + const struct mv88e6xxx_irq_ops *watchdog_ops; int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index b43bd6476632..6eb4eca7ca5b 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -350,6 +350,29 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) /* Offset 0x1c: Global Control 2 */ +static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask, + u16 val) +{ + u16 reg; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, ®); + if (err) + return err; + + reg &= ~mask; + reg |= val & mask; + + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg); +} + +int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port) +{ + const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK; + + return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask)); +} + int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) { u16 val; diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 6aee7316fea6..bcbb8046ad63 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -201,11 +201,12 @@ /* Offset 0x1C: Global Control 2 */ #define MV88E6XXX_G1_CTL2 0x1c -#define MV88E6XXX_G1_CTL2_NO_CASCADE 0xe000 -#define MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE 0xf000 #define MV88E6XXX_G1_CTL2_HIST_RX 0x0040 #define MV88E6XXX_G1_CTL2_HIST_TX 0x0080 #define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0 +#define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000 +#define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000 +#define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000 /* Offset 0x1D: Stats Operation Register */ #define MV88E6XXX_G1_STATS_OP 0x1d @@ -253,6 +254,8 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port); + int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, unsigned int msecs); -- cgit v1.2.3 From 23c9891996959dbde59333b8ed997d66b44fb3ff Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 9 May 2018 11:38:50 -0400 Subject: net: dsa: mv88e6xxx: set device number All Marvell switches supported by mv88e6xxx have to set their device number in the Global Control 2 register. Extract this in a read then write function, called from the device mapping setup code. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 11 ++++------- drivers/net/dsa/mv88e6xxx/global1.c | 7 +++++++ drivers/net/dsa/mv88e6xxx/global1.h | 3 +++ 3 files changed, 14 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 9b61d5b65b0f..ef753ed89b30 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1053,6 +1053,10 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) return err; } + err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index); + if (err) + return err; + return 0; } @@ -2160,15 +2164,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) { - struct dsa_switch *ds = chip->ds; int err; - /* Disable remote management, and set the switch's DSA device number. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, - (ds->index & 0x1f)); - if (err) - return err; - /* Configure the IP ToS mapping registers. */ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000); if (err) diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 6eb4eca7ca5b..89c6330c53eb 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -389,6 +389,13 @@ int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) return err; } +int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index) +{ + return mv88e6xxx_g1_ctl2_mask(chip, + MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK, + index); +} + /* Offset 0x1d: Statistics Operation 2 */ int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index bcbb8046ad63..8b043e813761 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -207,6 +207,7 @@ #define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000 #define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000 #define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000 +#define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f /* Offset 0x1D: Stats Operation Register */ #define MV88E6XXX_G1_STATS_OP 0x1d @@ -256,6 +257,8 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index); + int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, unsigned int msecs); -- cgit v1.2.3 From 9e5baf9b363673a8f78508c99a9d815da6ea7133 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 9 May 2018 11:38:51 -0400 Subject: net: dsa: mv88e6xxx: add RMU disable op The RMU mode bits moved a lot within the Global Control 2 register of the Marvell switch families. Add an .rmu_disable op to support at least 3 known alternatives. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 24 ++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/chip.h | 3 +++ drivers/net/dsa/mv88e6xxx/global1.c | 18 ++++++++++++++++++ drivers/net/dsa/mv88e6xxx/global1.h | 20 ++++++++++++++++++++ 4 files changed, 65 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index ef753ed89b30..5d933549dc49 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1069,6 +1069,14 @@ static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip) return 0; } +static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->rmu_disable) + return chip->info->ops->rmu_disable(chip); + + return 0; +} + static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) { if (chip->info->ops->pot_clear) @@ -2263,6 +2271,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_rmu_setup(chip); + if (err) + goto unlock; + err = mv88e6xxx_rsvd2cpu_setup(chip); if (err) goto unlock; @@ -2530,6 +2542,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6341_serdes_power, @@ -2588,6 +2601,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; @@ -2815,6 +2829,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -2888,6 +2903,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -2953,6 +2969,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -2989,6 +3006,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3025,6 +3043,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3062,6 +3081,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -3100,6 +3120,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3316,6 +3337,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -3359,6 +3381,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3399,6 +3422,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 62234c2287a2..a1bedb0a888b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -432,6 +432,9 @@ struct mv88e6xxx_ops { /* Interface to the AVB/PTP registers */ const struct mv88e6xxx_avb_ops *avb_ops; + + /* Remote Management Unit operations */ + int (*rmu_disable)(struct mv88e6xxx_chip *chip); }; struct mv88e6xxx_irq_ops { diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 89c6330c53eb..244ee1ff9edc 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -373,6 +373,24 @@ int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask)); } +int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM | + MV88E6085_G1_CTL2_RM_ENABLE, 0); +} + +int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK, + MV88E6352_G1_CTL2_RMU_MODE_DISABLED); +} + +int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK, + MV88E6390_G1_CTL2_RMU_MODE_DISABLED); +} + int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) { u16 val; diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 8b043e813761..e186a026e1b1 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -207,6 +207,22 @@ #define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000 #define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000 #define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000 +#define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000 +#define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_5 0x2000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_6 0x3000 +#define MV88E6085_G1_CTL2_DA_CHECK 0x4000 +#define MV88E6085_G1_CTL2_P10RM 0x2000 +#define MV88E6085_G1_CTL2_RM_ENABLE 0x1000 +#define MV88E6352_G1_CTL2_DA_CHECK 0x0800 +#define MV88E6390_G1_CTL2_RMU_MODE_MASK 0x0700 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_0 0x0000 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_1 0x0100 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_9 0x0200 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300 +#define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600 +#define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700 #define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f /* Offset 0x1D: Stats Operation Register */ @@ -257,6 +273,10 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port); +int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip); +int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip); +int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip); + int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index); int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); -- cgit v1.2.3 From 54472edff0afa09e35828b2b81b00695a9da1c17 Mon Sep 17 00:00:00 2001 From: Sekhar Nori Date: Wed, 9 May 2018 21:15:15 +0530 Subject: drivers: net: davinci_mdio: prevent spurious timeout A well timed kernel preemption in the time_after() loop in wait_for_idle() can result in a spurious timeout error to be returned. Fix it by using readl_poll_timeout() which takes care of this issue. Reviewed-by: Andrew Lunn Signed-off-by: Sekhar Nori Reviewed-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_mdio.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 3c33f4504d8e..98a1c97fb95e 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -227,14 +228,14 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) static inline int wait_for_idle(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; - unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); + u32 val, ret; - while (time_after(timeout, jiffies)) { - if (__raw_readl(®s->control) & CONTROL_IDLE) - return 0; - } - dev_err(data->dev, "timed out waiting for idle\n"); - return -ETIMEDOUT; + ret = readl_poll_timeout(®s->control, val, val & CONTROL_IDLE, + 0, MDIO_TIMEOUT * 1000); + if (ret) + dev_err(data->dev, "timed out waiting for idle\n"); + + return ret; } static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) -- cgit v1.2.3 From bfcbcb67e9611cd27da84810f09c57cbe89aa687 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 9 May 2018 09:00:07 -0700 Subject: hv_netvsc: typo in NDIS RSS parameters structure Fix simple misspelling kashkey_offset should be hashkey_offset. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/rndis_filter.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 6ebe39a3dde6..1be34d2e3563 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -110,7 +110,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ u16 hashkey_size; /* The offset of the secret key from the beginning of this structure */ - u32 kashkey_offset; + u32 hashkey_offset; u32 processor_masks_offset; u32 num_processor_masks; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 3b6dbacaf77d..3cb2c4666b2c 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -752,7 +752,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = NETVSC_HASH_KEYLEN; - rssp->kashkey_offset = rssp->indirect_taboffset + + rssp->hashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ @@ -761,7 +761,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, itab[i] = rdev->rx_table[i]; /* Set hask key values */ - keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); + keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); -- cgit v1.2.3 From f09555ffe379aeef7fa83c459bbe29e8bce8ec50 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 9 May 2018 17:24:38 +0100 Subject: net: hns3: Fix for setting mac address when resetting When hns3_init_mac_addr is called during reset process, it will get the mac address from NCL_CONFIG and set it to hardware. If user has changed the mac address, then the mac address set by user is lost during resetting. This patch fixes it by not getting the mac address from NCL_CONFIG when resetting. Fixes: 424eb834a9be ("net: hns3: Unified HNS3 {VF|PF} Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 729bcab947df..a55c8f515e99 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3046,13 +3046,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) } /* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev) +static void hns3_init_mac_addr(struct net_device *netdev, bool init) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; - if (h->ae_algo->ops->get_mac_addr) { + if (h->ae_algo->ops->get_mac_addr && init) { h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); ether_addr_copy(netdev->dev_addr, mac_addr_temp); } @@ -3106,7 +3106,7 @@ static int hns3_client_init(struct hnae3_handle *handle) handle->kinfo.netdev = netdev; handle->priv = (void *)priv; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, true); hns3_set_default_feature(netdev); @@ -3353,7 +3353,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, false); hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); -- cgit v1.2.3 From 0a78a1dfd17a05d377115d6352d12d5507a27994 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 9 May 2018 17:24:39 +0100 Subject: net: hns3: remove add/del_tunnel_udp in hns3_enet module The add/del_tunnel_udp is not implemented in hclge_main moulde, the NETIF_F_RX_UDP_TUNNEL_PORT feature bit is added automatically by stack when ndo_udp_tunnel_add is not null in dev->netdev_ops. This patch removes the add/del_tunnel_udp related function, for we do not support this feature now. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 7 -- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 89 ------------------------- 2 files changed, 96 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 37ec1b3286c6..804ea83e1713 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -273,10 +273,6 @@ struct hnae3_ae_dev { * Map rings to vector * unmap_ring_from_vector() * Unmap rings from vector - * add_tunnel_udp() - * Add tunnel information to hardware - * del_tunnel_udp() - * Delete tunnel information from hardware * reset_queue() * Reset queue * get_fw_version() @@ -388,9 +384,6 @@ struct hnae3_ae_ops { int vector_num, struct hnae3_ring_chain_node *vr_chain); - int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); - int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); - void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); u32 (*get_fw_version)(struct hnae3_handle *handle); void (*get_mdix_mode)(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a55c8f515e99..bd8e14b53889 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1244,93 +1244,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->tx_compressed = netdev->stats.tx_compressed; } -static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (udp_tnl->used && udp_tnl->dst_port == port) { - udp_tnl->used++; - return; - } - - if (udp_tnl->used) { - netdev_warn(netdev, - "UDP tunnel [%d], port [%d] offload\n", type, port); - return; - } - - udp_tnl->dst_port = port; - udp_tnl->used = 1; - /* TBD send command to hardware to add port */ - if (h->ae_algo->ops->add_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); -} - -static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (!udp_tnl->used || udp_tnl->dst_port != port) { - netdev_warn(netdev, - "Invalid UDP tunnel port %d\n", port); - return; - } - - udp_tnl->used--; - if (udp_tnl->used) - return; - - udp_tnl->dst_port = 0; - /* TBD send command to hardware to del port */ - if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->del_tunnel_udp(h, port); -} - -/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports - * @netdev: This physical ports's netdev - * @ti: Tunnel information - */ -static void hns3_nic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); - break; - } -} - -static void hns3_nic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - break; - } -} - static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; @@ -1569,8 +1482,6 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_get_stats64 = hns3_nic_get_stats64, .ndo_setup_tc = hns3_nic_setup_tc, .ndo_set_rx_mode = hns3_nic_set_rx_mode, - .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, - .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, -- cgit v1.2.3 From beebca3a911051a5f4d359ed64b0375a83d42490 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 9 May 2018 17:24:40 +0100 Subject: net: hns3: fix for cleaning ring problem The head or tail in hardware is not longer valid when resetting, current hns3_clear_all_ring use them to clean the ring, which will cause problem during resetting. This patch fixes it by using next_to_use and next_to_clean in the ring struct. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 34 ++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bd8e14b53889..4031174181a0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3203,9 +3203,35 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } -static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { - dev_kfree_skb_any(skb); + if (!HNAE3_IS_TX_RING(ring)) + return; + + while (ring->next_to_clean != ring->next_to_use) { + hns3_free_buffer_detach(ring, ring->next_to_clean); + ring_ptr_move_fw(ring, next_to_clean); + } +} + +static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) +{ + if (HNAE3_IS_TX_RING(ring)) + return; + + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so only need to unmap the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + hns3_unmap_buffer(ring, + &ring->desc_cb[ring->next_to_use]); + ring->desc_cb[ring->next_to_use].dma = 0; + } + + ring_ptr_move_fw(ring, next_to_use); + } } static void hns3_clear_all_ring(struct hnae3_handle *h) @@ -3219,13 +3245,13 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) struct hns3_enet_ring *ring; ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, ring->desc_num); + hns3_clear_tx_ring(ring); dev_queue = netdev_get_tx_queue(ndev, priv->ring_data[i].queue_index); netdev_tx_reset_queue(dev_queue); ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + hns3_clear_rx_ring(ring); } } -- cgit v1.2.3 From e4d68dae43fbed0132472061d9f1ab01ef4e3efe Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 9 May 2018 17:24:41 +0100 Subject: net: hns3: refactor the loopback related function This patch refactors the loopback related function in order to support the serdes loopback. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 21 +++---- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 68 +++++++++++----------- 2 files changed, 42 insertions(+), 47 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index eb3c34f3cf87..c16bb6cb0564 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -74,7 +74,7 @@ struct hns3_link_mode_mapping { u32 ethtool_link_mode; }; -static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); int ret; @@ -85,11 +85,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) switch (loop) { case HNAE3_MAC_INTER_LOOP_MAC: - ret = h->ae_algo->ops->set_loopback(h, loop, true); - break; - case HNAE3_MAC_LOOP_NONE: - ret = h->ae_algo->ops->set_loopback(h, - HNAE3_MAC_INTER_LOOP_MAC, false); + ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: ret = -ENOTSUPP; @@ -99,10 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) if (ret) return ret; - if (loop == HNAE3_MAC_LOOP_NONE) - h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); - else - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, en); return ret; } @@ -122,13 +115,13 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) return ret; } - ret = hns3_lp_setup(ndev, loop_mode); + ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); return ret; } -static int hns3_lp_down(struct net_device *ndev) +static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { struct hnae3_handle *h = hns3_get_handle(ndev); int ret; @@ -136,7 +129,7 @@ static int hns3_lp_down(struct net_device *ndev) if (!h->ae_algo->ops->stop) return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); + ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; @@ -332,7 +325,7 @@ static void hns3_self_test(struct net_device *ndev, data[test_index] = hns3_lp_up(ndev, loop_type); if (!data[test_index]) { data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev); + hns3_lp_down(ndev, loop_type); } if (data[test_index]) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 084b90437e23..316ec8427891 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3682,48 +3682,50 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) +static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_mac_mode_cmd *req; - struct hclge_dev *hdev = vport->back; struct hclge_desc desc; u32 loop_en; int ret; - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; - /* 1 Read out the MAC mode config at first */ - hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_CONFIG_MAC_MODE, - true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac loopback get fail, ret =%d.\n", - ret); - return ret; - } + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", ret); + return ret; + } - /* 2 Then setup the loopback flag */ - loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - if (en) - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1); - else - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); - req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); - /* 3 Config mac work mode with loopback flag - * and its original configure parameters - */ - hclge_cmd_reuse_desc(&desc, false); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "mac loopback set fail, ret =%d.\n", ret); + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + switch (loop_mode) { + case HNAE3_MAC_INTER_LOOP_MAC: + ret = hclge_set_mac_loopback(hdev, en); break; default: ret = -ENOTSUPP; -- cgit v1.2.3 From f60475761838478ececbc3d47ab385808441a0a0 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Wed, 9 May 2018 11:31:31 -0700 Subject: liquidio: monitor all of Octeon's cores in watchdog thread The liquidio_watchdog kernel thread is watching over only 12 cores of the Octeon CN23XX; it's neglecting the other 4 cores that are present in the CN2360. Fix it by defining LIO_MAX_CORES as 16. Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_network.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index dd3177a526d2..d7a3916fe877 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -192,7 +192,7 @@ struct lio { #define LIO_SIZE (sizeof(struct lio)) #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) -#define LIO_MAX_CORES 12 +#define LIO_MAX_CORES 16 /** * \brief Enable or disable feature -- cgit v1.2.3 From 7d0870f661e11b9248c87919bd2ce61ba6a67a82 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Wed, 9 May 2018 11:49:38 -0700 Subject: liquidio: bump up driver version to 1.7.2 to match newer NIC firmware Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 285b24836974..690424b6781a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -28,7 +28,7 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 #define LIQUIDIO_BASE_MINOR_VERSION 7 -#define LIQUIDIO_BASE_MICRO_VERSION 0 +#define LIQUIDIO_BASE_MICRO_VERSION 2 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) -- cgit v1.2.3 From 419476624c89905977aa945aea2f94f7d942caf5 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 10 May 2018 13:13:04 +0300 Subject: mlxsw: reg: Add MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH Add MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH to support VLAN-encapsulated port mirroring. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6218231e379e..3f4d7e22cece 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -6833,6 +6833,12 @@ enum mlxsw_reg_mpat_span_type { */ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0, + /* Remote SPAN Ethernet VLAN. + * The packet is forwarded to the monitoring port on the monitoring + * VLAN. + */ + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH = 0x1, + /* Encapsulated Remote SPAN Ethernet L3 GRE. * The packet is encapsulated with GRE header. */ -- cgit v1.2.3 From e00698d1d7957669002a39c039423921d8694033 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 10 May 2018 13:13:05 +0300 Subject: mlxsw: spectrum_span: Support mirror-to-VLAN Offload "tc action mirred mirror" to a device that is a vlan device on top of a front-panel port device. The hardware encapsulates the mirrored packets in a VLAN tag. That includes the case that the mirrored traffic is already VLAN-tagged--in that case the monitor traffic will be double-tagged, just like in the software path. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 64 ++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index cd9071ee19ad..d90582ee478f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -235,6 +235,14 @@ mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, return dev; } +static struct net_device * +mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, + u16 *p_vid) +{ + *p_vid = vlan_dev_vlan_id(vlan_dev); + return vlan_dev_real_dev(vlan_dev); +} + static __maybe_unused int mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, union mlxsw_sp_l3addr saddr, @@ -477,6 +485,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { }; #endif +static bool +mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) +{ + return is_vlan_dev(dev) && + mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); +} + +static int +mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct net_device *real_dev; + u16 vid; + + if (!(to_dev->flags & IFF_UP)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); + sparmsp->dest_port = netdev_priv(real_dev); + sparmsp->vid = vid; + return 0; +} + +static int +mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { + .can_handle = mlxsw_sp_span_vlan_can_handle, + .parms = mlxsw_sp_span_entry_vlan_parms, + .configure = mlxsw_sp_span_entry_vlan_configure, + .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, +}; + static const struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { &mlxsw_sp_span_entry_ops_phys, @@ -486,6 +549,7 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { #if IS_ENABLED(CONFIG_IPV6_GRE) &mlxsw_sp_span_entry_ops_gretap6, #endif + &mlxsw_sp_span_entry_ops_vlan, }; static int -- cgit v1.2.3 From 03c44132393e821fb4fbd75b9c9798f13c46eca6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 10 May 2018 13:13:06 +0300 Subject: mlxsw: spectrum_span: Support VLAN under mirror-to-gretap When mirroring to a gretap or ip6gretap device, allow the underlay packet path to include VLAN devices. The following configurations are supported in underlay: - vlan over phys - vlan-unaware bridge where the egress device is vlan over phys - vlan over vlan-aware bridge where the egress device is phys Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 27 +++++++++++++++------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index d90582ee478f..3b77990df599 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -176,21 +176,23 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, { struct bridge_vlan_info vinfo; struct net_device *edev; - u16 pvid; + u16 vid = *p_vid; - if (WARN_ON(br_vlan_get_pvid(br_dev, &pvid))) + if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) return NULL; - if (!pvid) + if (!vid || + br_vlan_get_info(br_dev, vid, &vinfo) || + !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) return NULL; - edev = br_fdb_find_port(br_dev, dmac, pvid); + edev = br_fdb_find_port(br_dev, dmac, vid); if (!edev) return NULL; - if (br_vlan_get_info(edev, pvid, &vinfo)) + if (br_vlan_get_info(edev, vid, &vinfo)) return NULL; if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)) - *p_vid = pvid; + *p_vid = vid; return edev; } @@ -208,13 +210,13 @@ mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, { struct mlxsw_sp_bridge_port *bridge_port; enum mlxsw_reg_spms_state spms_state; + struct net_device *dev = NULL; struct mlxsw_sp_port *port; - struct net_device *dev; u8 stp_state; if (br_vlan_enabled(br_dev)) dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); - else + else if (!*p_vid) dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); if (!dev) return NULL; @@ -261,12 +263,21 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, if (!l3edev || mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) goto unoffloadable; + if (is_vlan_dev(l3edev)) + l3edev = mlxsw_sp_span_entry_vlan(l3edev, &vid); + if (netif_is_bridge_master(l3edev)) { l3edev = mlxsw_sp_span_entry_bridge(l3edev, dmac, &vid); if (!l3edev) goto unoffloadable; } + if (is_vlan_dev(l3edev)) { + if (vid || !(l3edev->flags & IFF_UP)) + goto unoffloadable; + l3edev = mlxsw_sp_span_entry_vlan(l3edev, &vid); + } + if (!mlxsw_sp_port_dev_check(l3edev)) goto unoffloadable; -- cgit v1.2.3 From b3c594ab6fcf7a91453442755deb1f3941eeed64 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 10 May 2018 16:07:23 +0530 Subject: cxgb4: fix the wrong conversion of Mbps to Kbps fix the wrong conversion where 1 Mbps was converted to 1024 Kbps. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 24d2865b8806..1efcc85692f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2886,13 +2886,13 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) } /* Convert from Mbps to Kbps */ - req_rate = rate << 10; + req_rate = rate * 1000; /* Max rate is 100 Gbps */ - if (req_rate >= SCHED_MAX_RATE_KBPS) { + if (req_rate > SCHED_MAX_RATE_KBPS) { dev_err(adap->pdev_dev, "Invalid rate %u Mbps, Max rate is %u Mbps\n", - rate, SCHED_MAX_RATE_KBPS >> 10); + rate, SCHED_MAX_RATE_KBPS / 1000); return -ERANGE; } -- cgit v1.2.3 From ec9efb523cb8daf7b9d2e5c9cb80b255b716a777 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 10 May 2018 15:29:46 +0200 Subject: rocker: Postpone filtering of !added_by_user FDB Breaking out of the switch in rocker_switchdev_event() still ends up scheduling work, except an ill-defined one. This leads to an OOPS cited below. Fix by postponing the check until rocker_switchdev_event_work(). [ 23.148476] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [ 23.148810] PGD 0 P4D 0 [ 23.148982] Oops: 0000 [#1] PREEMPT SMP PTI [ 23.149190] Modules linked in: bridge stp llc iptable_nat nf_nat_ipv4 nf_nat e1000 rocker [ 23.149768] CPU: 0 PID: 239 Comm: kworker/u2:4 Not tainted 4.17.0-rc3-net_next_queue-custom #6 [ 23.150298] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-2.fc27 04/01/2014 [ 23.150868] Workqueue: rocker rocker_switchdev_event_work [rocker] [ 23.151258] RIP: 0010:ofdpa_port_fdb+0x7b/0x230 [rocker] [ 23.151597] RSP: 0018:ffffc900004b3e18 EFLAGS: 00010246 [ 23.151952] RAX: 00000000fffbc68c RBX: 0000000000000000 RCX: 0000000000000000 [ 23.152363] RDX: 0000000000000010 RSI: ffff88003b4471e0 RDI: 00000000ffffffff [ 23.152768] RBP: ffff88003b4471c0 R08: ffff88003b4471e0 R09: ffff88003b4471c0 [ 23.153141] R10: 0000000000000000 R11: 0000000000000000 R12: ffff880036caf000 [ 23.153515] R13: 0000000000000000 R14: 0000000000000000 R15: ffff88003bc00000 [ 23.153919] FS: 0000000000000000(0000) GS:ffff88003fc00000(0000) knlGS:0000000000000000 [ 23.154444] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 23.154806] CR2: 0000000000000000 CR3: 0000000036eb6000 CR4: 00000000000006f0 [ 23.155194] Call Trace: [ 23.155472] rocker_switchdev_event_work+0x9b/0xd0 [rocker] [ 23.155850] ? __schedule+0x231/0x700 [ 23.156175] process_one_work+0x1cf/0x3e0 [ 23.156490] worker_thread+0x26/0x3d0 [ 23.156795] ? trace_event_raw_event_workqueue_execute_start+0x80/0x80 [ 23.157181] kthread+0x10e/0x130 [ 23.157485] ? kthread_create_worker_on_cpu+0x40/0x40 [ 23.157824] ret_from_fork+0x35/0x40 [ 23.158174] Code: 00 00 c1 e8 02 4c 8d 45 20 bf ff ff ff ff 83 e0 01 4c 89 65 20 88 45 14 48 8b 05 21 da 1f e2 4c 89 c6 4c 89 44 24 10 48 89 45 18 <41> 8b 45 00 89 45 28 41 0f b7 45 04 66 89 45 2c 0f b7 44 24 04 [ 23.159346] RIP: ofdpa_port_fdb+0x7b/0x230 [rocker] RSP: ffffc900004b3e18 [ 23.159742] CR2: 0000000000000000 [ 23.160088] ---[ end trace f9b16d4cb6df0629 ]--- Fixes: 816a3bed9549 ("switchdev: Add fdb.added_by_user to switchdev notifications") Suggested-by: Vivien Didelot Signed-off-by: Petr Machata Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 152d6948611c..e73e4febeedb 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2738,6 +2738,8 @@ static void rocker_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = rocker_world_port_fdb_add(rocker_port, fdb_info); if (err) { netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); @@ -2747,6 +2749,8 @@ static void rocker_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = rocker_world_port_fdb_del(rocker_port, fdb_info); if (err) netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); @@ -2783,8 +2787,6 @@ static int rocker_switchdev_event(struct notifier_block *unused, switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (!fdb_info->added_by_user) - break; memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); -- cgit v1.2.3 From bb322a90383603aaf71ad882a7c5a48b39a02603 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 10 May 2018 13:17:29 -0700 Subject: net: phy: phylink: Use gpiod_get_value_cansleep() The GPIO provider for the link GPIO line might require the use of the _cansleep() API, utilize that. This is safe to do since we run in workqueue context. Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index c582b2d7546c..412d1cf4fa66 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -360,7 +360,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat if (pl->get_fixed_state) pl->get_fixed_state(pl->netdev, state); else if (pl->link_gpio) - state->link = !!gpiod_get_value(pl->link_gpio); + state->link = !!gpiod_get_value_cansleep(pl->link_gpio); } /* Flow control is resolved according to our and the link partners -- cgit v1.2.3 From daab3349ad1a69663ccad278ed71d55974d104b4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 10 May 2018 13:17:30 -0700 Subject: net: phy: phylink: Release link GPIO We are not releasing the link GPIO descriptor with gpiod_put() which results in subsequent probing to get -EBUSY when calling fwnode_get_named_gpiod(). Fix this by doing the release in phylink_destroy(). Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 412d1cf4fa66..6392b5248cf5 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -612,6 +612,8 @@ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) sfp_unregister_upstream(pl->sfp_bus); + if (!IS_ERR(pl->link_gpio)) + gpiod_put(pl->link_gpio); cancel_work_sync(&pl->resolve); kfree(pl); -- cgit v1.2.3 From 9cd00a8aa42e44a5a3f555e4999803856a7871f5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 13:17:31 -0700 Subject: net: phy: phylink: Poll link GPIOs When using a fixed link with a link GPIO, we need to poll that GPIO to determine link state changes. This is consistent with what fixed_phy.c does. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 6392b5248cf5..581ce93ecaf9 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "sfp.h" @@ -54,6 +55,7 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; struct gpio_desc *link_gpio; + struct timer_list link_poll; void (*get_fixed_state)(struct net_device *dev, struct phylink_link_state *s); @@ -500,6 +502,15 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_fixed_poll(struct timer_list *t) +{ + struct phylink *pl = container_of(t, struct phylink, link_poll); + + mod_timer(t, jiffies + HZ); + + phylink_run_resolve(pl); +} + static const struct sfp_upstream_ops sfp_phylink_ops; static int phylink_register_sfp(struct phylink *pl, @@ -572,6 +583,7 @@ struct phylink *phylink_create(struct net_device *ndev, pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + timer_setup(&pl->link_poll, phylink_fixed_poll, 0); bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); linkmode_copy(pl->link_config.advertising, pl->supported); @@ -905,6 +917,8 @@ void phylink_start(struct phylink *pl) clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) + mod_timer(&pl->link_poll, jiffies + HZ); if (pl->sfp_bus) sfp_upstream_start(pl->sfp_bus); if (pl->phydev) @@ -929,6 +943,8 @@ void phylink_stop(struct phylink *pl) phy_stop(pl->phydev); if (pl->sfp_bus) sfp_upstream_stop(pl->sfp_bus); + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) + del_timer_sync(&pl->link_poll); set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); queue_work(system_power_efficient_wq, &pl->resolve); -- cgit v1.2.3 From bc0cb653a3f06b5a1ee9f12cc1c367a8736aca28 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 10 May 2018 13:17:33 -0700 Subject: net: dsa: bcm_sf2: Implement phylink_mac_ops Make the bcm_sf2 driver implement phylink_mac_ops since it needs to support a wide variety of network interfaces: internal & external MDIO PHYs, fixed PHYs, MoCA with MMIO link status. A large amount of what needs to be done already exists under bcm_sf2_sw_adjust_link() so we are essentially breaking this down into the necessary operation for PHYLINK to work: mac_config, mac_link_up, mac_link_down and validate. We can now entirely get rid of most of what fixed_link_update() provided because only the link information is actually necessary. We still have to force DUPLEX_FULL for legacy Device Tree bindings that did not specify that before. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 214 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 206 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 97236cfcbae4..a20608b0329e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -306,7 +307,8 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { - struct bcm_sf2_priv *priv = dev_id; + struct dsa_switch *ds = dev_id; + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq0_mask; @@ -317,16 +319,21 @@ static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) { - struct bcm_sf2_priv *priv = dev_id; + struct dsa_switch *ds = dev_id; + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq1_mask; intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); - if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) - priv->port_sts[7].link = 1; - if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) - priv->port_sts[7].link = 0; + if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) { + priv->port_sts[7].link = true; + dsa_port_phylink_mac_change(ds, 7, true); + } + if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) { + priv->port_sts[7].link = false; + dsa_port_phylink_mac_change(ds, 7, false); + } return IRQ_HANDLED; } @@ -620,6 +627,192 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->pause = 1; } +static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_MOCA) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 id_mode_dis = 0, port_mode; + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + + switch (state->interface) { + case PHY_INTERFACE_MODE_RGMII: + id_mode_dis = 1; + /* fallthrough */ + case PHY_INTERFACE_MODE_RGMII_TXID: + port_mode = EXT_GPHY; + break; + case PHY_INTERFACE_MODE_MII: + port_mode = EXT_EPHY; + break; + case PHY_INTERFACE_MODE_REVMII: + port_mode = EXT_REVMII; + break; + default: + /* all other PHYs: internal and MoCA */ + goto force_link; + } + + /* Clear id_mode_dis bit, and the existing port mode, let + * RGMII_MODE_EN bet set by mac_link_{up,down} + */ + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg &= ~ID_MODE_DIS; + reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); + reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); + + reg |= port_mode; + if (id_mode_dis) + reg |= ID_MODE_DIS; + + if (state->pause & MLO_PAUSE_TXRX_MASK) { + if (state->pause & MLO_PAUSE_TX) + reg |= TX_PAUSE_EN; + reg |= RX_PAUSE_EN; + } + + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + +force_link: + /* Force link settings detected from the PHY */ + reg = SW_OVERRIDE; + switch (state->speed) { + case SPEED_1000: + reg |= SPDSTS_1000 << SPEED_SHIFT; + break; + case SPEED_100: + reg |= SPDSTS_100 << SPEED_SHIFT; + break; + } + + if (state->link) + reg |= LINK_STS; + if (state->duplex == DUPLEX_FULL) + reg |= DUPLX_MODE; + + core_writel(priv, reg, offset); +} + +static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, + phy_interface_t interface, bool link) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg; + + if (!phy_interface_mode_is_rgmii(interface) && + interface != PHY_INTERFACE_MODE_MII && + interface != PHY_INTERFACE_MODE_REVMII) + return; + + /* If the link is down, just disable the interface to conserve power */ + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + if (link) + reg |= RGMII_MODE_EN; + else + reg &= ~RGMII_MODE_EN; + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); +} + +static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + bcm_sf2_sw_mac_link_set(ds, port, interface, false); +} + +static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_eee *p = &priv->dev->ports[port].eee; + + bcm_sf2_sw_mac_link_set(ds, port, interface, true); + + if (mode == MLO_AN_PHY && phydev) + p->eee_enabled = b53_eee_init(ds, port, phydev); +} + +static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, + struct phylink_link_state *status) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + + status->link = false; + + /* MoCA port is special as we do not get link status from CORE_LNKSTS, + * which means that we need to force the link at the port override + * level to get the data to flow. We do use what the interrupt handler + * did determine before. + * + * For the other ports, we just force the link status, since this is + * a fixed PHY device. + */ + if (port == priv->moca_port) { + status->link = priv->port_sts[port].link; + /* For MoCA interfaces, also force a link down notification + * since some version of the user-space daemon (mocad) use + * cmd->autoneg to force the link, which messes up the PHY + * state machine and make it go in PHY_FORCING state instead. + */ + if (!status->link) + netif_carrier_off(ds->ports[port].slave); + status->duplex = DUPLEX_FULL; + } else { + status->link = true; + } +} + static void bcm_sf2_enable_acb(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@ -863,6 +1056,11 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .get_phy_flags = bcm_sf2_sw_get_phy_flags, .adjust_link = bcm_sf2_sw_adjust_link, .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .phylink_validate = bcm_sf2_sw_validate, + .phylink_mac_config = bcm_sf2_sw_mac_config, + .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, + .phylink_mac_link_up = bcm_sf2_sw_mac_link_up, + .phylink_fixed_state = bcm_sf2_sw_fixed_state, .suspend = bcm_sf2_sw_suspend, .resume = bcm_sf2_sw_resume, .get_wol = bcm_sf2_sw_get_wol, @@ -1065,14 +1263,14 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) bcm_sf2_intr_disable(priv); ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); + "switch_0", ds); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_mdio; } ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); + "switch_1", ds); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); goto out_mdio; -- cgit v1.2.3 From c9a2356f35409a667429254bc326b10f92c7ecce Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 13:17:35 -0700 Subject: net: dsa: mv88e6xxx: add PHYLINK support Add rudimentary phylink support to mv88e6xxx. This allows the driver using user ports with fixed links to keep operating normally. User ports with normal PHYs are not affected since the switch automatically manages their link parameters. User facing ports which use a SFP/SFF with a non-fixed link mode might require a call to phylink_mac_change() to operate properly. Signed-off-by: Russell King [Andrew: fixed link setting after adding link polling] Signed-off-by: Andrew Lunn [florian: expand commit message] Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 83 ++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/port.c | 39 +++++++++++++++++++ drivers/net/dsa/mv88e6xxx/port.h | 3 ++ 3 files changed, 125 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5d933549dc49..1cebde80b101 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "chip.h" @@ -580,6 +581,83 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to configure MAC\n", port); } +static void mv88e6xxx_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ +} + +static int mv88e6xxx_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_link_state(chip, port, state); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int speed, duplex, link, err; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + link = LINK_FORCED_UP; + speed = state->speed; + duplex = state->duplex; + } else { + speed = SPEED_UNFORCED; + duplex = DUPLEX_UNFORCED; + link = LINK_UNFORCED; + } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, + state->interface); + mutex_unlock(&chip->reg_lock); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, "p%d: failed to configure MAC\n", port); +} + +static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = chip->info->ops->port_set_link(chip, port, link); + mutex_unlock(&chip->reg_lock); + + if (err) + dev_err(chip->dev, "p%d: failed to force MAC link\n", port); +} + +static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + if (mode == MLO_AN_FIXED) + mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN); +} + +static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface, + struct phy_device *phydev) +{ + if (mode == MLO_AN_FIXED) + mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP); +} + static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) { if (!chip->info->ops->stats_snapshot) @@ -4169,6 +4247,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .adjust_link = mv88e6xxx_adjust_link, + .phylink_validate = mv88e6xxx_validate, + .phylink_mac_link_state = mv88e6xxx_link_state, + .phylink_mac_config = mv88e6xxx_mac_config, + .phylink_mac_link_down = mv88e6xxx_mac_link_down, + .phylink_mac_link_up = mv88e6xxx_mac_link_up, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 6315774d72b3..429d0ebcd5b1 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "chip.h" #include "port.h" @@ -378,6 +379,44 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) return 0; } +int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); + if (err) + return err; + + switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) { + case MV88E6XXX_PORT_STS_SPEED_10: + state->speed = SPEED_10; + break; + case MV88E6XXX_PORT_STS_SPEED_100: + state->speed = SPEED_100; + break; + case MV88E6XXX_PORT_STS_SPEED_1000: + state->speed = SPEED_1000; + break; + case MV88E6XXX_PORT_STS_SPEED_10000: + if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) == + MV88E6XXX_PORT_STS_CMODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_10000; + break; + } + + state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ? + DUPLEX_FULL : DUPLEX_HALF; + state->link = !!(reg & MV88E6XXX_PORT_STS_LINK); + state->an_enabled = 1; + state->an_complete = state->link; + + return 0; +} + /* Offset 0x02: Jamming Control * * Do not limit the period of time that this port can be paused for by diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index b16d5f0e6e9c..5e1db1b221ca 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -29,6 +29,7 @@ #define MV88E6XXX_PORT_STS_SPEED_10 0x0000 #define MV88E6XXX_PORT_STS_SPEED_100 0x0100 #define MV88E6XXX_PORT_STS_SPEED_1000 0x0200 +#define MV88E6XXX_PORT_STS_SPEED_10000 0x0300 #define MV88E6352_PORT_STS_EEE 0x0040 #define MV88E6165_PORT_STS_AM_DIS 0x0040 #define MV88E6185_PORT_STS_MGMII 0x0040 @@ -295,6 +296,8 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); +int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int upstream_port); -- cgit v1.2.3 From 58d56fcc3964f9be0a9ca42fd126bcd9dc7afc90 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 10 May 2018 13:17:37 -0700 Subject: net: dsa: bcm_sf2: Get rid of PHYLIB functions Now that we have converted the bcm_sf2 driver to implement PHYLINK MAC operations, we can remove the PHYLIB callbacks: adjust_link() and fixed_link_update() which are no longer called by DSA. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 149 ---------------------------------------------- 1 file changed, 149 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index a20608b0329e..ac621f44237a 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -480,153 +480,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) return priv->hw_params.gphy_rev; } -static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->dev->ports[port].eee; - u32 id_mode_dis = 0, port_mode; - const char *str = NULL; - u32 reg, offset; - - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); - else - offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); - - switch (phydev->interface) { - case PHY_INTERFACE_MODE_RGMII: - str = "RGMII (no delay)"; - id_mode_dis = 1; - case PHY_INTERFACE_MODE_RGMII_TXID: - if (!str) - str = "RGMII (TX delay)"; - port_mode = EXT_GPHY; - break; - case PHY_INTERFACE_MODE_MII: - str = "MII"; - port_mode = EXT_EPHY; - break; - case PHY_INTERFACE_MODE_REVMII: - str = "Reverse MII"; - port_mode = EXT_REVMII; - break; - default: - /* All other PHYs: internal and MoCA */ - goto force_link; - } - - /* If the link is down, just disable the interface to conserve power */ - if (!phydev->link) { - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); - reg &= ~RGMII_MODE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); - goto force_link; - } - - /* Clear id_mode_dis bit, and the existing port mode, but - * make sure we enable the RGMII block for data to pass - */ - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); - reg &= ~ID_MODE_DIS; - reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); - reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); - - reg |= port_mode | RGMII_MODE_EN; - if (id_mode_dis) - reg |= ID_MODE_DIS; - - if (phydev->pause) { - if (phydev->asym_pause) - reg |= TX_PAUSE_EN; - reg |= RX_PAUSE_EN; - } - - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); - - pr_info("Port %d configured for %s\n", port, str); - -force_link: - /* Force link settings detected from the PHY */ - reg = SW_OVERRIDE; - switch (phydev->speed) { - case SPEED_1000: - reg |= SPDSTS_1000 << SPEED_SHIFT; - break; - case SPEED_100: - reg |= SPDSTS_100 << SPEED_SHIFT; - break; - } - - if (phydev->link) - reg |= LINK_STS; - if (phydev->duplex == DUPLEX_FULL) - reg |= DUPLX_MODE; - - core_writel(priv, reg, offset); - - if (!phydev->is_pseudo_fixed_link) - p->eee_enabled = b53_eee_init(ds, port, phydev); -} - -static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, - struct fixed_phy_status *status) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 duplex, pause, offset; - u32 reg; - - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); - else - offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); - - duplex = core_readl(priv, CORE_DUPSTS); - pause = core_readl(priv, CORE_PAUSESTS); - - status->link = 0; - - /* MoCA port is special as we do not get link status from CORE_LNKSTS, - * which means that we need to force the link at the port override - * level to get the data to flow. We do use what the interrupt handler - * did determine before. - * - * For the other ports, we just force the link status, since this is - * a fixed PHY device. - */ - if (port == priv->moca_port) { - status->link = priv->port_sts[port].link; - /* For MoCA interfaces, also force a link down notification - * since some version of the user-space daemon (mocad) use - * cmd->autoneg to force the link, which messes up the PHY - * state machine and make it go in PHY_FORCING state instead. - */ - if (!status->link) - netif_carrier_off(ds->ports[port].slave); - status->duplex = 1; - } else { - status->link = 1; - status->duplex = !!(duplex & (1 << port)); - } - - reg = core_readl(priv, offset); - reg |= SW_OVERRIDE; - if (status->link) - reg |= LINK_STS; - else - reg &= ~LINK_STS; - core_writel(priv, reg, offset); - - if ((pause & (1 << port)) && - (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { - status->asym_pause = 1; - status->pause = 1; - } - - if (pause & (1 << port)) - status->pause = 1; -} - static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -1054,8 +907,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .get_sset_count = b53_get_sset_count, .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .adjust_link = bcm_sf2_sw_adjust_link, - .fixed_link_update = bcm_sf2_sw_fixed_link_update, .phylink_validate = bcm_sf2_sw_validate, .phylink_mac_config = bcm_sf2_sw_mac_config, .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, -- cgit v1.2.3 From fc74ecbc293cba27fd8adee7a18fbda2f8d9e285 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 11 May 2018 11:57:30 +0300 Subject: mlxsw: spectrum_span: Rename misnamed variable l3edev Calling the variable l3edev was relevant when neighbor lookup was the last stage in the simulated pipeline. Now that mlxsw handles bridges and vlan devices as well, calling it "L3" is a misnomer. Thus in mlxsw_sp_span_dmac(), rename to "dev", because that function is just a service routine where the distinction between tunnel and egress device isn't necessary. In mlxsw_sp_span_entry_tunnel_parms_common(), rename to "edev" to emphasize that the routine traces packet egress. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 32 +++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3b77990df599..adf1f789b166 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -137,14 +137,14 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { static int mlxsw_sp_span_dmac(struct neigh_table *tbl, const void *pkey, - struct net_device *l3edev, + struct net_device *dev, unsigned char dmac[ETH_ALEN]) { - struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev); + struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); int err = 0; if (!neigh) { - neigh = neigh_create(tbl, pkey, l3edev); + neigh = neigh_create(tbl, pkey, dev); if (IS_ERR(neigh)) return PTR_ERR(neigh); } @@ -246,7 +246,7 @@ mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, } static __maybe_unused int -mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, +mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, union mlxsw_sp_l3addr saddr, union mlxsw_sp_l3addr daddr, union mlxsw_sp_l3addr gw, @@ -260,31 +260,31 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, if (mlxsw_sp_l3addr_is_zero(gw)) gw = daddr; - if (!l3edev || mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) + if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) goto unoffloadable; - if (is_vlan_dev(l3edev)) - l3edev = mlxsw_sp_span_entry_vlan(l3edev, &vid); + if (is_vlan_dev(edev)) + edev = mlxsw_sp_span_entry_vlan(edev, &vid); - if (netif_is_bridge_master(l3edev)) { - l3edev = mlxsw_sp_span_entry_bridge(l3edev, dmac, &vid); - if (!l3edev) + if (netif_is_bridge_master(edev)) { + edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); + if (!edev) goto unoffloadable; } - if (is_vlan_dev(l3edev)) { - if (vid || !(l3edev->flags & IFF_UP)) + if (is_vlan_dev(edev)) { + if (vid || !(edev->flags & IFF_UP)) goto unoffloadable; - l3edev = mlxsw_sp_span_entry_vlan(l3edev, &vid); + edev = mlxsw_sp_span_entry_vlan(edev, &vid); } - if (!mlxsw_sp_port_dev_check(l3edev)) + if (!mlxsw_sp_port_dev_check(edev)) goto unoffloadable; - sparmsp->dest_port = netdev_priv(l3edev); + sparmsp->dest_port = netdev_priv(edev); sparmsp->ttl = ttl; memcpy(sparmsp->dmac, dmac, ETH_ALEN); - memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); + memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); sparmsp->saddr = saddr; sparmsp->daddr = daddr; sparmsp->vid = vid; -- cgit v1.2.3 From c41c0dd7a641cb28f7239e7086419741af78c448 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 11 May 2018 11:57:31 +0300 Subject: mlxsw: spectrum_span: Use a more fitting error code ENOENT is suitable when an item is looked for in a collection and can't be found. The failure here is actually a depletion of a resource, where ENOBUFS is the more fitting error code. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index adf1f789b166..e5f4f7620ab7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -924,7 +924,7 @@ int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) - return -ENOENT; + return -ENOBUFS; netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", span_entry->id); -- cgit v1.2.3 From dbdc8a21759959af2c4a702cacbb94f9b1864e73 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Fri, 11 May 2018 02:53:10 -0700 Subject: bonding: replace the return value type The method ndo_start_xmit is defined as returning a netdev_tx_t, which is a typedef for an enum type, but the implementation in this driver returns an int. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 8 ++++---- drivers/net/bonding/bond_main.c | 12 ++++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 1ed9529e7bd1..601f67872e58 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1316,8 +1316,8 @@ void bond_alb_deinitialize(struct bonding *bond) rlb_deinitialize(bond); } -static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, - struct slave *tx_slave) +static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, + struct slave *tx_slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data = eth_hdr(skb); @@ -1351,7 +1351,7 @@ out: return NETDEV_TX_OK; } -int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) +netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct ethhdr *eth_data; @@ -1389,7 +1389,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) return bond_do_alb_xmit(skb, bond, tx_slave); } -int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) +netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct ethhdr *eth_data; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 718e4914e3a0..01bca863e7d8 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3805,7 +3805,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond) return slave_id; } -static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct iphdr *iph = ip_hdr(skb); @@ -3841,7 +3842,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev /* In active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. */ -static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; @@ -3979,7 +3981,8 @@ out: * usable slave array is formed in the control path. The xmit function * just calculates hash and sends the packet out. */ -static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, + struct net_device *dev) { struct bonding *bond = netdev_priv(dev); struct slave *slave; @@ -3999,7 +4002,8 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) } /* in broadcast mode, we send everything to all usable interfaces. */ -static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; -- cgit v1.2.3 From ae35c6f7a5e78c3bcf1b468a3d352fa0021a22bb Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Fri, 11 May 2018 02:53:11 -0700 Subject: bonding: use the skb_get/set_queue_mapping Use the skb_get_queue_mapping, skb_set_queue_mapping and skb_rx_queue_recorded for skb queue_mapping in bonding driver, but not use it directly. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 01bca863e7d8..966d09183744 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -247,7 +247,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, BUILD_BUG_ON(sizeof(skb->queue_mapping) != sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); @@ -4040,12 +4040,12 @@ static inline int bond_slave_override(struct bonding *bond, struct slave *slave = NULL; struct list_head *iter; - if (!skb->queue_mapping) + if (!skb_rx_queue_recorded(skb)) return 1; /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave_rcu(bond, slave, iter) { - if (slave->queue_id == skb->queue_mapping) { + if (slave->queue_id == skb_get_queue_mapping(skb)) { if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { bond_dev_queue_xmit(bond, skb, slave->dev); @@ -4071,7 +4071,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; /* Save the original txq to restore before passing to the driver */ - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); if (unlikely(txq >= dev->real_num_tx_queues)) { do { -- cgit v1.2.3 From f4a313b9713933af5c7928dbb6bedc8077ade572 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 11 May 2018 18:37:34 +0530 Subject: cxgb4: Add new T5 device id Add 0x50ad device id for new T5 card. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 90b5274a8ba1..adacc6399131 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -187,6 +187,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ /* T6 adapters: */ -- cgit v1.2.3 From 0e249898cac811f833bdb4701b32385c1c272da5 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Fri, 11 May 2018 18:34:43 +0530 Subject: cxgb4: Fix {vxlan/geneve}_port initialization adapter->rawf_cnt was not initialized, thereby ndo_udp_tunnel_{add/del} was returning immediately without initializing {vxlan/geneve}_port. Also initializes mps_encap_entry refcnt. Fixes: 846eac3fccec ("cxgb4: implement udp tunnel callbacks") Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 21 +++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 2 ++ 2 files changed, 23 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1efcc85692f7..03767c0bfd5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4276,6 +4276,20 @@ static int adap_init0(struct adapter *adap) adap->tids.nftids = val[4] - val[3] + 1; adap->sge.ingr_start = val[5]; + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + /* Read the raw mps entries. In T6, the last 2 tcam entries + * are reserved for raw mac addresses (rawf = 2, one per port). + */ + params[0] = FW_PARAM_PFVF(RAWF_START); + params[1] = FW_PARAM_PFVF(RAWF_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (ret == 0) { + adap->rawf_start = val[0]; + adap->rawf_cnt = val[1] - val[0] + 1; + } + } + /* qids (ingress/egress) returned from firmware can be anywhere * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. * Hence driver needs to allocate memory for this range to @@ -5181,6 +5195,7 @@ static void free_some_resources(struct adapter *adapter) { unsigned int i; + kvfree(adapter->mps_encap); kvfree(adapter->smt); kvfree(adapter->l2t); kvfree(adapter->srq); @@ -5677,6 +5692,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->params.offload = 0; } + adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) * + adapter->params.arch.mps_tcam_size, + GFP_KERNEL); + if (!adapter->mps_encap) + dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n"); + #if IS_ENABLED(CONFIG_IPV6) if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index e3d4751f21ac..0e007ee72318 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1305,6 +1305,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, FW_PARAMS_PARAM_PFVF_TLS_START = 0x34, FW_PARAMS_PARAM_PFVF_TLS_END = 0x35, + FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36, + FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; -- cgit v1.2.3 From c50ae55e41ee226b96cf204cd1409bc0057b484e Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 11 May 2018 18:35:33 +0530 Subject: cxgb4: enable inner header checksum calculation set cntrl bits to indicate whether inner header checksum needs to be calculated whenever the packet is an encapsulated packet and enable supported encap features. Fixes: d0a1299c6bf7 ("cxgb4: add support for vxlan segmentation offload") Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 9 ++- drivers/net/ethernet/chelsio/cxgb4/sge.c | 91 +++++++++++++++++++------ drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 5 ++ 3 files changed, 83 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 03767c0bfd5c..1e31b9dfffee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5626,8 +5626,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; - if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) + if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO | NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + } if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1a28df137e1f..0f87e973a158 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1072,12 +1072,27 @@ static void *inline_tx_skb_header(const struct sk_buff *skb, static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; - const struct iphdr *iph = ip_hdr(skb); + bool inner_hdr_csum = false; + u16 proto, ver; - if (iph->version == 4) { - if (iph->protocol == IPPROTO_TCP) + if (skb->encapsulation && + (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) + inner_hdr_csum = true; + + if (inner_hdr_csum) { + ver = inner_ip_hdr(skb)->version; + proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : + inner_ipv6_hdr(skb)->nexthdr; + } else { + ver = ip_hdr(skb)->version; + proto = (ver == 4) ? ip_hdr(skb)->protocol : + ipv6_hdr(skb)->nexthdr; + } + + if (ver == 4) { + if (proto == IPPROTO_TCP) csum_type = TX_CSUM_TCPIP; - else if (iph->protocol == IPPROTO_UDP) + else if (proto == IPPROTO_UDP) csum_type = TX_CSUM_UDPIP; else { nocsum: /* @@ -1090,19 +1105,29 @@ nocsum: /* /* * this doesn't work with extension headers */ - const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; - - if (ip6h->nexthdr == IPPROTO_TCP) + if (proto == IPPROTO_TCP) csum_type = TX_CSUM_TCPIP6; - else if (ip6h->nexthdr == IPPROTO_UDP) + else if (proto == IPPROTO_UDP) csum_type = TX_CSUM_UDPIP6; else goto nocsum; } if (likely(csum_type >= TX_CSUM_TCPIP)) { - u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); - int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + int eth_hdr_len, l4_len; + u64 hdr_len; + + if (inner_hdr_csum) { + /* This allows checksum offload for all encapsulated + * packets like GRE etc.. + */ + l4_len = skb_inner_network_header_len(skb); + eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; + } else { + l4_len = skb_network_header_len(skb); + eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + } + hdr_len = TXPKT_IPHDR_LEN_V(l4_len); if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); @@ -1273,7 +1298,7 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; - u64 cntrl, *end; + u64 cntrl, *end, *sgl; int qidx, credits; unsigned int flits, ndesc; struct adapter *adap; @@ -1443,6 +1468,19 @@ out_free: dev_kfree_skb_any(skb); TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | TXPKT_IPHDR_LEN_V(l3hdr_len); } + sgl = (u64 *)(cpl + 1); /* sgl start here */ + if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { + /* If current position is already at the end of the + * txq, reset the current to point to start of the queue + * and update the end ptr as well. + */ + if (sgl == (u64 *)q->q.stat) { + int left = (u8 *)end - (u8 *)q->q.stat; + + end = (void *)q->q.desc + left; + sgl = (void *)q->q.desc; + } + } q->tso++; q->tx_cso += ssi->gso_segs; } else { @@ -1454,6 +1492,7 @@ out_free: dev_kfree_skb_any(skb); wr->op_immdlen = htonl(FW_WR_OP_V(op) | FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); + sgl = (u64 *)(cpl + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(adap->params.chip, skb) | TXPKT_IPCSUM_DIS_F; @@ -1487,13 +1526,12 @@ out_free: dev_kfree_skb_any(skb); cpl->ctrl1 = cpu_to_be64(cntrl); if (immediate) { - cxgb4_inline_tx_skb(skb, &q->q, cpl + 1); + cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { int last_desc; - cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), - end, 0, addr); + cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); skb_orphan(skb); last_desc = q->q.pidx + ndesc - 1; @@ -2259,7 +2297,7 @@ static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, } static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, - const struct cpl_rx_pkt *pkt) + const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) { struct adapter *adapter = rxq->rspq.adap; struct sge *s = &adapter->sge; @@ -2275,6 +2313,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, } copy_frags(skb, gl, s->pktshift); + if (tnl_hdr_len) + skb->csum_level = 1; skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; @@ -2406,7 +2446,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; - u16 err_vec; + u16 err_vec, tnl_hdr_len = 0; struct port_info *pi; int ret = 0; @@ -2415,16 +2455,19 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, pkt = (const struct cpl_rx_pkt *)rsp; /* Compressed error vector is enabled for T6 only */ - if (q->adap->params.tp.rx_pkt_encap) + if (q->adap->params.tp.rx_pkt_encap) { err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); - else + tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); + } else { err_vec = be16_to_cpu(pkt->err_vec); + } csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); - if ((pkt->l2info & htonl(RXF_TCP_F)) && + if (((pkt->l2info & htonl(RXF_TCP_F)) || + tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { - do_gro(rxq, si, pkt); + do_gro(rxq, si, pkt, tnl_hdr_len); return 0; } @@ -2471,7 +2514,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, } else if (pkt->l2info & htonl(RXF_IP_F)) { __sum16 c = (__force __sum16)pkt->csum; skb->csum = csum_unfold(c); - skb->ip_summed = CHECKSUM_COMPLETE; + + if (tnl_hdr_len) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } else { + skb->ip_summed = CHECKSUM_COMPLETE; + } rxq->stats.rx_cso++; } } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index fe2029e993a2..09e38f0733bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1233,6 +1233,11 @@ struct cpl_rx_pkt { #define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S) #define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U) +#define T6_RX_TNLHDR_LEN_S 8 +#define T6_RX_TNLHDR_LEN_M 0xFF +#define T6_RX_TNLHDR_LEN_V(x) ((x) << T6_RX_TNLHDR_LEN_S) +#define T6_RX_TNLHDR_LEN_G(x) (((x) >> T6_RX_TNLHDR_LEN_S) & T6_RX_TNLHDR_LEN_M) + struct cpl_trace_pkt { u8 opcode; u8 intf; -- cgit v1.2.3 From 443e2dab32a59b99730b999909718ed83fee4725 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 11 May 2018 18:36:16 +0530 Subject: cxgb4: avoid schedule while atomic do not sleep while adding or deleting udp tunnel. Fixes: 846eac3fccec ("cxgb4: implement udp tunnel callbacks") Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1e31b9dfffee..3e4c533c1622 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3081,7 +3081,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev, match_all_mac, match_all_mac, adapter->rawf_start + pi->port_id, - 1, pi->port_id, true); + 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", i); @@ -3169,7 +3169,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, match_all_mac, adapter->rawf_start + pi->port_id, - 1, pi->port_id, true); + 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", be16_to_cpu(ti->port)); -- cgit v1.2.3 From b753a9faaf9aef1338c28ebd9ace6d749428788b Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Fri, 11 May 2018 13:08:19 -0500 Subject: net: phy: DP83TC811: Introduce support for the DP83TC811 phy Add support for the DP83811 phy. The DP83811 supports both rgmii and sgmii interfaces. There are 2 part numbers for this the DP83TC811R does not reliably support the SGMII interface but the DP83TC811S will. There is not a way to differentiate these parts from the hardware or register set. So this is controlled via the DT to indicate which phy mode is required. Or the part can be strapped to a certain interface. Data sheet can be found here: http://www.ti.com/product/DP83TC811S-Q1/description http://www.ti.com/product/DP83TC811R-Q1/description Signed-off-by: Dan Murphy Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 5 + drivers/net/phy/Makefile | 1 + drivers/net/phy/dp83tc811.c | 347 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 353 insertions(+) create mode 100644 drivers/net/phy/dp83tc811.c (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ae2d69170dc3..19a5778a7ec7 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -291,6 +291,11 @@ config DP83822_PHY ---help--- Supports the DP83822 PHY. +config DP83TC811_PHY + tristate "Texas Instruments DP83TC822 PHY" + ---help--- + Supports the DP83TC822 PHY. + config DP83848_PHY tristate "Texas Instruments DP83848 PHY" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 22183b9d7e08..738246960e3b 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_CORTINA_PHY) += cortina.o obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_DP83822_PHY) += dp83822.o +obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_FIXED_PHY) += fixed_phy.o diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c new file mode 100644 index 000000000000..081d99aa3985 --- /dev/null +++ b/drivers/net/phy/dp83tc811.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Texas Instruments DP83TC811 PHY + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DP83TC811_PHY_ID 0x2000a253 +#define DP83811_DEVADDR 0x1f + +#define MII_DP83811_SGMII_CTRL 0x09 +#define MII_DP83811_INT_STAT1 0x12 +#define MII_DP83811_INT_STAT2 0x13 +#define MII_DP83811_RESET_CTRL 0x1f + +#define DP83811_HW_RESET BIT(15) +#define DP83811_SW_RESET BIT(14) + +/* INT_STAT1 bits */ +#define DP83811_RX_ERR_HF_INT_EN BIT(0) +#define DP83811_MS_TRAINING_INT_EN BIT(1) +#define DP83811_ANEG_COMPLETE_INT_EN BIT(2) +#define DP83811_ESD_EVENT_INT_EN BIT(3) +#define DP83811_WOL_INT_EN BIT(4) +#define DP83811_LINK_STAT_INT_EN BIT(5) +#define DP83811_ENERGY_DET_INT_EN BIT(6) +#define DP83811_LINK_QUAL_INT_EN BIT(7) + +/* INT_STAT2 bits */ +#define DP83811_JABBER_DET_INT_EN BIT(0) +#define DP83811_POLARITY_INT_EN BIT(1) +#define DP83811_SLEEP_MODE_INT_EN BIT(2) +#define DP83811_OVERTEMP_INT_EN BIT(3) +#define DP83811_OVERVOLTAGE_INT_EN BIT(6) +#define DP83811_UNDERVOLTAGE_INT_EN BIT(7) + +#define MII_DP83811_RXSOP1 0x04a5 +#define MII_DP83811_RXSOP2 0x04a6 +#define MII_DP83811_RXSOP3 0x04a7 + +/* WoL Registers */ +#define MII_DP83811_WOL_CFG 0x04a0 +#define MII_DP83811_WOL_STAT 0x04a1 +#define MII_DP83811_WOL_DA1 0x04a2 +#define MII_DP83811_WOL_DA2 0x04a3 +#define MII_DP83811_WOL_DA3 0x04a4 + +/* WoL bits */ +#define DP83811_WOL_MAGIC_EN BIT(0) +#define DP83811_WOL_SECURE_ON BIT(5) +#define DP83811_WOL_EN BIT(7) +#define DP83811_WOL_INDICATION_SEL BIT(8) +#define DP83811_WOL_CLR_INDICATION BIT(11) + +/* SGMII CTRL bits */ +#define DP83811_TDR_AUTO BIT(8) +#define DP83811_SGMII_EN BIT(12) +#define DP83811_SGMII_AUTO_NEG_EN BIT(13) +#define DP83811_SGMII_TX_ERR_DIS BIT(14) +#define DP83811_SGMII_SOFT_RESET BIT(15) + +static int dp83811_ack_interrupt(struct phy_device *phydev) +{ + int err; + + err = phy_read(phydev, MII_DP83811_INT_STAT1); + if (err < 0) + return err; + + err = phy_read(phydev, MII_DP83811_INT_STAT2); + if (err < 0) + return err; + + return 0; +} + +static int dp83811_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct net_device *ndev = phydev->attached_dev; + const u8 *mac; + u16 value; + + if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + mac = (const u8 *)ndev->dev_addr; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + /* MAC addresses start with byte 5, but stored in mac[0]. + * 811 PHYs store bytes 4|5, 2|3, 0|1 + */ + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA1, + (mac[1] << 8) | mac[0]); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA2, + (mac[3] << 8) | mac[2]); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA3, + (mac[5] << 8) | mac[4]); + + value = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG); + if (wol->wolopts & WAKE_MAGIC) + value |= DP83811_WOL_MAGIC_EN; + else + value &= ~DP83811_WOL_MAGIC_EN; + + if (wol->wolopts & WAKE_MAGICSECURE) { + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP1, + (wol->sopass[1] << 8) | wol->sopass[0]); + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP2, + (wol->sopass[3] << 8) | wol->sopass[2]); + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP3, + (wol->sopass[5] << 8) | wol->sopass[4]); + value |= DP83811_WOL_SECURE_ON; + } else { + value &= ~DP83811_WOL_SECURE_ON; + } + + value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | + DP83811_WOL_CLR_INDICATION); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); + } else { + value = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG); + value &= ~DP83811_WOL_EN; + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); + } + + return 0; +} + +static void dp83811_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + u16 sopass_val; + int value; + + wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE); + wol->wolopts = 0; + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + if (value & DP83811_WOL_MAGIC_EN) + wol->wolopts |= WAKE_MAGIC; + + if (value & DP83811_WOL_SECURE_ON) { + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP1); + wol->sopass[0] = (sopass_val & 0xff); + wol->sopass[1] = (sopass_val >> 8); + + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP2); + wol->sopass[2] = (sopass_val & 0xff); + wol->sopass[3] = (sopass_val >> 8); + + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP3); + wol->sopass[4] = (sopass_val & 0xff); + wol->sopass[5] = (sopass_val >> 8); + + wol->wolopts |= WAKE_MAGICSECURE; + } + + /* WoL is not enabled so set wolopts to 0 */ + if (!(value & DP83811_WOL_EN)) + wol->wolopts = 0; +} + +static int dp83811_config_intr(struct phy_device *phydev) +{ + int misr_status, err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + misr_status = phy_read(phydev, MII_DP83811_INT_STAT1); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_RX_ERR_HF_INT_EN | + DP83811_MS_TRAINING_INT_EN | + DP83811_ANEG_COMPLETE_INT_EN | + DP83811_ESD_EVENT_INT_EN | + DP83811_WOL_INT_EN | + DP83811_LINK_STAT_INT_EN | + DP83811_ENERGY_DET_INT_EN | + DP83811_LINK_QUAL_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT1, misr_status); + if (err < 0) + return err; + + misr_status = phy_read(phydev, MII_DP83811_INT_STAT2); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_JABBER_DET_INT_EN | + DP83811_POLARITY_INT_EN | + DP83811_SLEEP_MODE_INT_EN | + DP83811_OVERTEMP_INT_EN | + DP83811_OVERVOLTAGE_INT_EN | + DP83811_UNDERVOLTAGE_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status); + + } else { + err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + } + + return err; +} + +static int dp83811_config_aneg(struct phy_device *phydev) +{ + int value, err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); + if (phydev->autoneg == AUTONEG_ENABLE) { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (DP83811_SGMII_AUTO_NEG_EN | value)); + if (err < 0) + return err; + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_AUTO_NEG_EN & value)); + if (err < 0) + return err; + } + } + + return genphy_config_aneg(phydev); +} + +static int dp83811_config_init(struct phy_device *phydev) +{ + int value, err; + + err = genphy_config_init(phydev); + if (err < 0) + return err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); + if (!(value & DP83811_SGMII_EN)) { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (DP83811_SGMII_EN | value)); + if (err < 0) + return err; + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_EN & value)); + if (err < 0) + return err; + } + } + + value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; + + return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); +} + +static int dp83811_phy_reset(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, MII_DP83811_RESET_CTRL, DP83811_HW_RESET); + if (err < 0) + return err; + + return 0; +} + +static int dp83811_suspend(struct phy_device *phydev) +{ + int value; + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + if (!(value & DP83811_WOL_EN)) + genphy_suspend(phydev); + + return 0; +} + +static int dp83811_resume(struct phy_device *phydev) +{ + int value; + + genphy_resume(phydev); + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value | + DP83811_WOL_CLR_INDICATION); + + return 0; +} + +static struct phy_driver dp83811_driver[] = { + { + .phy_id = DP83TC811_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83TC811", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = dp83811_config_init, + .config_aneg = dp83811_config_aneg, + .soft_reset = dp83811_phy_reset, + .get_wol = dp83811_get_wol, + .set_wol = dp83811_set_wol, + .ack_interrupt = dp83811_ack_interrupt, + .config_intr = dp83811_config_intr, + .suspend = dp83811_suspend, + .resume = dp83811_resume, + }, +}; +module_phy_driver(dp83811_driver); + +static struct mdio_device_id __maybe_unused dp83811_tbl[] = { + { DP83TC811_PHY_ID, 0xfffffff0 }, + { }, +}; +MODULE_DEVICE_TABLE(mdio, dp83811_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83TC811 PHY driver"); +MODULE_AUTHOR("Dan Murphy Date: Thu, 10 May 2018 14:57:35 +0200 Subject: wireless-drivers: Dynamically allocate struct station_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the addition of the TXQ stats to cfg80211, the station_info struct has grown to be quite large, which results in warnings when allocated on the stack. Fix the affected places to do dynamic allocations instead. Fixes: 52539ca89f36 ("cfg80211: Expose TXQ stats and parameters to userspace") Reviewed-by: Sergey Matyukevich Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/main.c | 14 +++++--- drivers/net/wireless/ath/wil6210/debugfs.c | 22 +++++++----- drivers/net/wireless/ath/wil6210/wmi.c | 19 ++++++---- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 18 ++++++---- drivers/net/wireless/marvell/mwifiex/uap_event.c | 25 ++++++++----- drivers/net/wireless/quantenna/qtnfmac/event.c | 41 ++++++++++++++-------- 6 files changed, 91 insertions(+), 48 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index db95f85751e3..808fb30be9ad 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -426,7 +426,7 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, { u8 *ies = NULL, *wpa_ie = NULL, *pos; size_t ies_len = 0; - struct station_info sinfo; + struct station_info *sinfo; ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); @@ -482,16 +482,20 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, keymgmt, ucipher, auth, apsd_info); /* send event to application */ - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return; /* TODO: sinfo.generation */ - sinfo.assoc_req_ies = ies; - sinfo.assoc_req_ies_len = ies_len; + sinfo->assoc_req_ies = ies; + sinfo->assoc_req_ies_len = ies_len; - cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); + cfg80211_new_sta(vif->ndev, mac_addr, sinfo, GFP_KERNEL); netif_wake_queue(vif->ndev); + + kfree(sinfo); } void disconnect_timer_handler(struct timer_list *t) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8c90b3111f0b..11e46e44381e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1200,8 +1200,12 @@ static const struct file_operations fops_freq = { static int wil_link_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - struct station_info sinfo; - int i, rc; + struct station_info *sinfo; + int i, rc = 0; + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; @@ -1229,19 +1233,21 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL; if (vif) { - rc = wil_cid_fill_sinfo(vif, i, &sinfo); + rc = wil_cid_fill_sinfo(vif, i, sinfo); if (rc) - return rc; + goto out; - seq_printf(s, " Tx_mcs = %d\n", sinfo.txrate.mcs); - seq_printf(s, " Rx_mcs = %d\n", sinfo.rxrate.mcs); - seq_printf(s, " SQ = %d\n", sinfo.signal); + seq_printf(s, " Tx_mcs = %d\n", sinfo->txrate.mcs); + seq_printf(s, " Rx_mcs = %d\n", sinfo->rxrate.mcs); + seq_printf(s, " SQ = %d\n", sinfo->signal); } else { seq_puts(s, " INVALID MID\n"); } } - return 0; +out: + kfree(sinfo); + return rc; } static int wil_link_seq_open(struct inode *inode, struct file *file) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index a3dda9a97c1f..90de9a916241 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -824,7 +824,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) struct wireless_dev *wdev = vif_to_wdev(vif); struct wmi_connect_event *evt = d; int ch; /* channel number */ - struct station_info sinfo; + struct station_info *sinfo; u8 *assoc_req_ie, *assoc_resp_ie; size_t assoc_req_ielen, assoc_resp_ielen; /* capinfo(u16) + listen_interval(u16) + IEs */ @@ -940,6 +940,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) vif->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (rc) { if (disable_ap_sme) /* notify new_sta has failed */ @@ -947,16 +948,22 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) goto out; } - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) { + rc = -ENOMEM; + goto out; + } - sinfo.generation = wil->sinfo_gen++; + sinfo->generation = wil->sinfo_gen++; if (assoc_req_ie) { - sinfo.assoc_req_ies = assoc_req_ie; - sinfo.assoc_req_ies_len = assoc_req_ielen; + sinfo->assoc_req_ies = assoc_req_ie; + sinfo->assoc_req_ies_len = assoc_req_ielen; } - cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); + cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL); + + kfree(sinfo); } else { wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype, evt->cid); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index ff6b3514c501..79a124d1aa23 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5498,7 +5498,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, static int generation; u32 event = e->event_code; u32 reason = e->reason; - struct station_info sinfo; + struct station_info *sinfo; brcmf_dbg(CONN, "event %s (%u), reason %d\n", brcmf_fweh_event_name(event), event, reason); @@ -5511,16 +5511,22 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && (reason == BRCMF_E_STATUS_SUCCESS)) { - memset(&sinfo, 0, sizeof(sinfo)); if (!data) { brcmf_err("No IEs present in ASSOC/REASSOC_IND"); return -EINVAL; } - sinfo.assoc_req_ies = data; - sinfo.assoc_req_ies_len = e->datalen; + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + + sinfo->assoc_req_ies = data; + sinfo->assoc_req_ies_len = e->datalen; generation++; - sinfo.generation = generation; - cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL); + sinfo->generation = generation; + cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL); + + kfree(sinfo); } else if ((event == BRCMF_E_DISASSOC_IND) || (event == BRCMF_E_DEAUTH_IND) || (event == BRCMF_E_DEAUTH)) { diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index e8c8728db15a..e86217a6b9ca 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -108,7 +108,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) struct mwifiex_adapter *adapter = priv->adapter; int len, i; u32 eventcause = adapter->event_cause; - struct station_info sinfo; + struct station_info *sinfo; struct mwifiex_assoc_event *event; struct mwifiex_sta_node *node; u8 *deauth_mac; @@ -117,7 +117,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) switch (eventcause) { case EVENT_UAP_STA_ASSOC: - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + event = (struct mwifiex_assoc_event *) (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { @@ -132,28 +135,31 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) len = ETH_ALEN; if (len != -1) { - sinfo.assoc_req_ies = &event->data[len]; - len = (u8 *)sinfo.assoc_req_ies - + sinfo->assoc_req_ies = &event->data[len]; + len = (u8 *)sinfo->assoc_req_ies - (u8 *)&event->frame_control; - sinfo.assoc_req_ies_len = + sinfo->assoc_req_ies_len = le16_to_cpu(event->len) - (u16)len; } } - cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, + cfg80211_new_sta(priv->netdev, event->sta_addr, sinfo, GFP_KERNEL); node = mwifiex_add_sta_entry(priv, event->sta_addr); if (!node) { mwifiex_dbg(adapter, ERROR, "could not create station entry!\n"); + kfree(sinfo); return -1; } - if (!priv->ap_11n_enabled) + if (!priv->ap_11n_enabled) { + kfree(sinfo); break; + } - mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies, - sinfo.assoc_req_ies_len, node); + mwifiex_set_sta_ht_cap(priv, sinfo->assoc_req_ies, + sinfo->assoc_req_ies_len, node); for (i = 0; i < MAX_NUM_TID; i++) { if (node->is_11n_enabled) @@ -163,6 +169,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; } memset(node->rx_seq, 0xff, sizeof(node->rx_seq)); + kfree(sinfo); break; case EVENT_UAP_STA_DEAUTH: deauth_mac = adapter->event_body + diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index cb2a6c12f870..16617c44f81b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -34,12 +34,13 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, { const u8 *sta_addr; u16 frame_control; - struct station_info sinfo = { 0 }; + struct station_info *sinfo; size_t payload_len; u16 tlv_type; u16 tlv_value_len; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; + int ret = 0; if (unlikely(len < sizeof(*sta_assoc))) { pr_err("VIF%u.%u: payload is too short (%u < %zu)\n", @@ -53,6 +54,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, return -EPROTO; } + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + sta_addr = sta_assoc->sta_addr; frame_control = le16_to_cpu(sta_assoc->frame_control); @@ -61,9 +66,9 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, qtnf_sta_list_add(vif, sta_addr); - sinfo.assoc_req_ies = NULL; - sinfo.assoc_req_ies_len = 0; - sinfo.generation = vif->generation; + sinfo->assoc_req_ies = NULL; + sinfo->assoc_req_ies_len = 0; + sinfo->generation = vif->generation; payload_len = len - sizeof(*sta_assoc); tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies; @@ -73,23 +78,27 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, tlv_value_len = le16_to_cpu(tlv->len); tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); - if (tlv_full_len > payload_len) - return -EINVAL; + if (tlv_full_len > payload_len) { + ret = -EINVAL; + goto out; + } if (tlv_type == QTN_TLV_ID_IE_SET) { const struct qlink_tlv_ie_set *ie_set; unsigned int ie_len; - if (payload_len < sizeof(*ie_set)) - return -EINVAL; + if (payload_len < sizeof(*ie_set)) { + ret = -EINVAL; + goto out; + } ie_set = (const struct qlink_tlv_ie_set *)tlv; ie_len = tlv_value_len - (sizeof(*ie_set) - sizeof(ie_set->hdr)); if (ie_set->type == QLINK_IE_SET_ASSOC_REQ && ie_len) { - sinfo.assoc_req_ies = ie_set->ie_data; - sinfo.assoc_req_ies_len = ie_len; + sinfo->assoc_req_ies = ie_set->ie_data; + sinfo->assoc_req_ies_len = ie_len; } } @@ -97,13 +106,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } - if (payload_len) - return -EINVAL; + if (payload_len) { + ret = -EINVAL; + goto out; + } - cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, &sinfo, + cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, sinfo, GFP_KERNEL); - return 0; +out: + kfree(sinfo); + return ret; } static int -- cgit v1.2.3 From 6823dc0d91e5d238ca14a252228a5121d41eb517 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 10 May 2018 16:06:09 +0200 Subject: mt76x2: add a polling delay in mt76x2_mac_stop routine Add a usleep_range in mt76x2_mac_stop routine in order to add a polling delay checking values of MT_MAC_STATUS and IBI_R12 registers Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index d21e4a7c1bb9..dd4c1127797e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -370,12 +370,12 @@ void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) /* Wait for MAC to become idle */ for (i = 0; i < 300; i++) { - if (mt76_rr(dev, MT_MAC_STATUS) & - (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) - continue; - - if (mt76_rr(dev, MT_BBP(IBI, 12))) + if ((mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || + mt76_rr(dev, MT_BBP(IBI, 12))) { + usleep_range(10, 20); continue; + } stopped = true; break; -- cgit v1.2.3 From b41c393676696c20456d5fd8cbe71453560ee06e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 10 May 2018 15:32:17 +0100 Subject: rsi: fix spelling mistake: "thead" -> "thread" Trivial fix to spelling mistake in rsi_dbg debug message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 1f1b97220d43..3644d7d99463 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -485,7 +485,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) } rsi_core_queue_pkt(common, skb); - rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__); + rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__); rsi_set_event(&common->tx_thread.event); return; -- cgit v1.2.3 From cfb353c0dc058bc1619cc226d3cbbda1f360bdd3 Mon Sep 17 00:00:00 2001 From: Yu Wang Date: Thu, 3 May 2018 14:01:25 +0800 Subject: ath10k: add quiet mode support for QCA6174/QCA9377 To enable thermal throttling for QCA6174 and QCA9377, implement gen_pdev_set_quiet_mode for them. With this change, quiet period for QCA6174/QCA9377 can be also set/get via debugfs. Usage: To set quiet period: echo > /sys/kernel/debug/ieee80211/phyX/ath10k/quiet_period To get the current quiet period: cat /sys/kernel/debug/ieee80211/phyX/ath10k/quiet_period This change has been verified with QCA6174 hw3.2(fw: WLAN_RM.4.4.1-00102-QCARMSWP-1). Signed-off-by: Yu Wang Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 33 ++++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi-tlv.h | 16 +++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 01f4eb201330..2e34a1fc5ba6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3135,6 +3135,37 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, return skb; } +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, + u32 duration, u32 next_offset, + u32 enabled) +{ + struct wmi_tlv_set_quiet_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + /* vdev_id is not in use, set to 0 */ + cmd->vdev_id = __cpu_to_le32(0); + cmd->period = __cpu_to_le32(period); + cmd->duration = __cpu_to_le32(duration); + cmd->next_start = __cpu_to_le32(next_offset); + cmd->enabled = __cpu_to_le32(enabled); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv quiet param: period %u duration %u enabled %d\n", + period, duration, enabled); + return skb; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) { @@ -3854,7 +3885,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, - /* .gen_pdev_set_quiet_mode not implemented */ + .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode, .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, /* .gen_addba_clear_resp not implemented */ /* .gen_addba_send not implemented */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 954c50bb3f66..3e1e340cd834 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1952,6 +1953,21 @@ struct wmi_tlv_wow_add_del_event_cmd { __le32 event_bitmap; } __packed; +/* Command to set/unset chip in quiet mode */ +struct wmi_tlv_set_quiet_cmd { + __le32 vdev_id; + + /* in TUs */ + __le32 period; + + /* in TUs */ + __le32 duration; + + /* offset in TUs */ + __le32 next_start; + __le32 enabled; +} __packed; + struct wmi_tlv_wow_enable_cmd { __le32 enable; } __packed; -- cgit v1.2.3 From 469bd5eab6224c966253e62c50af1084a7cdad64 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 5 May 2018 00:04:08 -0300 Subject: ath10k: snoc: Remove owner assignment from platform_driver Structure platform_driver does not need to set the owner field, as this will be populated by the driver core. Generated by scripts/coccinelle/api/platform_no_drv_owner.cocci. Signed-off-by: Fabio Estevam Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 47a4d2a5bd4c..a3a7042fe13a 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1385,7 +1385,6 @@ static struct platform_driver ath10k_snoc_driver = { .remove = ath10k_snoc_remove, .driver = { .name = "ath10k_snoc", - .owner = THIS_MODULE, .of_match_table = ath10k_snoc_dt_match, }, }; -- cgit v1.2.3 From e3148cc5fecf60dcbd07e5c9cae987976d25cb17 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Sun, 6 May 2018 15:25:00 +0200 Subject: ath10k: fix return value check in wake_tx_q op ath10k_mac_tx_push_txq returns either a postive integer (length) on success or a negative error code on error. The "if (ret) break;" statement will thus always break out of the loop immediately after ath10k_mac_tx_push_txq has returned (making the loop pointless). A side effect of this fix is that we will iterate the queue until ath10k_mac_tx_push_txq returns -ENOENT. This will make sure the queue is not added back to ar->txqs when it is empty. This could potentially improve performance somewhat (I have seen a small improvement with SDIO devices). Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 3d7119ad7c7a..487a7a7380fd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4290,7 +4290,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { ret = ath10k_mac_tx_push_txq(hw, f_txq); - if (ret) + if (ret < 0) break; } if (ret != -ENOENT) -- cgit v1.2.3 From 3e2740cda22473cca2abd117baeabf20d9024fba Mon Sep 17 00:00:00 2001 From: Kenneth Lu Date: Mon, 7 May 2018 17:39:48 +0800 Subject: ath10k: remove variables which set but not used Variable buf_len and num_vdev_stats are being assigned but never read. These are redundant and can be remove. Signed-off-by: Kenneth Lu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index bb8e9e259a56..46fb96ee5852 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2318,7 +2318,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) u32 phy_mode; u32 snr; u32 rate; - u32 buf_len; u16 fc; int ret; @@ -2330,7 +2329,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) } channel = __le32_to_cpu(arg.channel); - buf_len = __le32_to_cpu(arg.buf_len); rx_status = __le32_to_cpu(arg.status); snr = __le32_to_cpu(arg.snr); phy_mode = __le32_to_cpu(arg.phy_mode); @@ -2740,14 +2738,13 @@ static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats *stats) { const struct wmi_stats_event *ev = (void *)skb->data; - u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + u32 num_pdev_stats, num_peer_stats; int i; if (!skb_pull(skb, sizeof(*ev))) return -EPROTO; num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2795,14 +2792,13 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats *stats) { const struct wmi_stats_event *ev = (void *)skb->data; - u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + u32 num_pdev_stats, num_peer_stats; int i; if (!skb_pull(skb, sizeof(*ev))) return -EPROTO; num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2856,7 +2852,6 @@ static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, const struct wmi_10_2_stats_event *ev = (void *)skb->data; u32 num_pdev_stats; u32 num_pdev_ext_stats; - u32 num_vdev_stats; u32 num_peer_stats; int i; @@ -2865,7 +2860,6 @@ static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2935,7 +2929,6 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, const struct wmi_10_2_stats_event *ev = (void *)skb->data; u32 num_pdev_stats; u32 num_pdev_ext_stats; - u32 num_vdev_stats; u32 num_peer_stats; int i; @@ -2944,7 +2937,6 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { -- cgit v1.2.3 From 0be928850dac4dc3f9f4e0a9966b6c809557a494 Mon Sep 17 00:00:00 2001 From: Marcus Folkesson Date: Wed, 9 May 2018 09:56:33 +0200 Subject: ath10k: hw: make consistent usage of ATH10K_FW_DIR in paths For some reason not all entries used ATH10K_FW_DIR so fix that. No functional changes. Signed-off-by: Marcus Folkesson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/hw.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index b8bdabe73073..23467e9fefeb 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -85,11 +85,11 @@ enum qca9377_chip_id_rev { QCA9377_HW_1_1_CHIP_ID_REV = 0x1, }; -#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" +#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1" #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 -#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0" +#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0" #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 -- cgit v1.2.3 From c3f7f31efe39209722a6fd19d7b7a4cceb7ca75c Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Wed, 9 May 2018 15:03:21 +0530 Subject: ath10k: replace bit shifts with the BIT() macro for rx desc bits Use the BIT() macro from 'linux/bitops.h' to define the rx desc bit flags to have consistency with new definitions. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/rx_desc.h | 136 +++++++++++++++--------------- 1 file changed, 69 insertions(+), 67 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 545deb6d7af1..ea4075d456fa 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -18,39 +18,41 @@ #ifndef _RX_DESC_H_ #define _RX_DESC_H_ +#include + enum rx_attention_flags { - RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0, - RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1, - RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2, - RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3, - RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4, - RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5, - RX_ATTENTION_FLAGS_NON_QOS = 1 << 6, - RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7, - RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8, - RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9, - RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10, - RX_ATTENTION_FLAGS_EOSP = 1 << 11, - RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12, - RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13, - RX_ATTENTION_FLAGS_ORDER = 1 << 14, - RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15, - RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16, - RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17, - RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18, - RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19, - RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20, - RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21, - RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22, - RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23, - RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24, - RX_ATTENTION_FLAGS_DIRECTED = 1 << 25, - RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26, - RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27, - RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28, - RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29, - RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30, - RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31, + RX_ATTENTION_FLAGS_FIRST_MPDU = BIT(0), + RX_ATTENTION_FLAGS_LAST_MPDU = BIT(1), + RX_ATTENTION_FLAGS_MCAST_BCAST = BIT(2), + RX_ATTENTION_FLAGS_PEER_IDX_INVALID = BIT(3), + RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = BIT(4), + RX_ATTENTION_FLAGS_POWER_MGMT = BIT(5), + RX_ATTENTION_FLAGS_NON_QOS = BIT(6), + RX_ATTENTION_FLAGS_NULL_DATA = BIT(7), + RX_ATTENTION_FLAGS_MGMT_TYPE = BIT(8), + RX_ATTENTION_FLAGS_CTRL_TYPE = BIT(9), + RX_ATTENTION_FLAGS_MORE_DATA = BIT(10), + RX_ATTENTION_FLAGS_EOSP = BIT(11), + RX_ATTENTION_FLAGS_U_APSD_TRIGGER = BIT(12), + RX_ATTENTION_FLAGS_FRAGMENT = BIT(13), + RX_ATTENTION_FLAGS_ORDER = BIT(14), + RX_ATTENTION_FLAGS_CLASSIFICATION = BIT(15), + RX_ATTENTION_FLAGS_OVERFLOW_ERR = BIT(16), + RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = BIT(17), + RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = BIT(18), + RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = BIT(19), + RX_ATTENTION_FLAGS_SA_IDX_INVALID = BIT(20), + RX_ATTENTION_FLAGS_DA_IDX_INVALID = BIT(21), + RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = BIT(22), + RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = BIT(23), + RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = BIT(24), + RX_ATTENTION_FLAGS_DIRECTED = BIT(25), + RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = BIT(26), + RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = BIT(27), + RX_ATTENTION_FLAGS_TKIP_MIC_ERR = BIT(28), + RX_ATTENTION_FLAGS_DECRYPT_ERR = BIT(29), + RX_ATTENTION_FLAGS_FCS_ERR = BIT(30), + RX_ATTENTION_FLAGS_MSDU_DONE = BIT(31), }; struct rx_attention { @@ -254,15 +256,15 @@ enum htt_rx_mpdu_encrypt_type { #define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28 -#define RX_MPDU_START_INFO0_FROM_DS (1 << 11) -#define RX_MPDU_START_INFO0_TO_DS (1 << 12) -#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13) -#define RX_MPDU_START_INFO0_RETRY (1 << 14) -#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15) +#define RX_MPDU_START_INFO0_FROM_DS BIT(11) +#define RX_MPDU_START_INFO0_TO_DS BIT(12) +#define RX_MPDU_START_INFO0_ENCRYPTED BIT(13) +#define RX_MPDU_START_INFO0_RETRY BIT(14) +#define RX_MPDU_START_INFO0_TXBF_H_INFO BIT(15) #define RX_MPDU_START_INFO1_TID_MASK 0xf0000000 #define RX_MPDU_START_INFO1_TID_LSB 28 -#define RX_MPDU_START_INFO1_DIRECTED (1 << 16) +#define RX_MPDU_START_INFO1_DIRECTED BIT(16) struct rx_mpdu_start { __le32 info0; @@ -357,13 +359,13 @@ struct rx_mpdu_start { #define RX_MPDU_END_INFO0_RESERVED_0_LSB 0 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16 -#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13) -#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14) -#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15) -#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28) -#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29) -#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30) -#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31) +#define RX_MPDU_END_INFO0_OVERFLOW_ERR BIT(13) +#define RX_MPDU_END_INFO0_LAST_MPDU BIT(14) +#define RX_MPDU_END_INFO0_POST_DELIM_ERR BIT(15) +#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR BIT(28) +#define RX_MPDU_END_INFO0_TKIP_MIC_ERR BIT(29) +#define RX_MPDU_END_INFO0_DECRYPT_ERR BIT(30) +#define RX_MPDU_END_INFO0_FCS_ERR BIT(31) struct rx_mpdu_end { __le32 info0; @@ -422,12 +424,12 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8 #define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000 #define RX_MSDU_START_INFO1_SA_IDX_LSB 16 -#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10) -#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11) -#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12) -#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13) -#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) -#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +#define RX_MSDU_START_INFO1_IPV4_PROTO BIT(10) +#define RX_MSDU_START_INFO1_IPV6_PROTO BIT(11) +#define RX_MSDU_START_INFO1_TCP_PROTO BIT(12) +#define RX_MSDU_START_INFO1_UDP_PROTO BIT(13) +#define RX_MSDU_START_INFO1_IP_FRAG BIT(14) +#define RX_MSDU_START_INFO1_TCP_ONLY_ACK BIT(15) #define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff #define RX_MSDU_START_INFO2_DA_IDX_LSB 0 @@ -568,10 +570,10 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 -#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14) -#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15) -#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) -#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) +#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14) +#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15) +#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30) +#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31) struct rx_msdu_end_common { __le16 ip_hdr_cksum; @@ -691,7 +693,7 @@ struct rx_msdu_end { #define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C #define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D -#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0) +#define RX_PPDU_START_INFO0_IS_GREENFIELD BIT(0) #define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f #define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0 @@ -701,15 +703,15 @@ struct rx_msdu_end { #define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24 -#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4) -#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17) +#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT BIT(4) +#define RX_PPDU_START_INFO1_L_SIG_PARITY BIT(17) #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0 #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0 -#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24) +#define RX_PPDU_START_INFO3_TXBF_H_INFO BIT(24) #define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff #define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0 @@ -898,14 +900,14 @@ struct rx_ppdu_start { * Reserved: HW should fill with 0, FW should ignore. */ -#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) -#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) -#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) +#define RX_PPDU_END_FLAGS_PHY_ERR BIT(0) +#define RX_PPDU_END_FLAGS_RX_LOCATION BIT(1) +#define RX_PPDU_END_FLAGS_TXBF_H_INFO BIT(2) #define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff #define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0 -#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) -#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) +#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL BIT(25) #define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc #define RX_PPDU_END_INFO1_PEER_IDX_LSB 2 @@ -1265,9 +1267,9 @@ struct rx_ppdu_end { * to 0. */ -#define FW_RX_DESC_INFO0_DISCARD (1 << 0) -#define FW_RX_DESC_INFO0_FORWARD (1 << 1) -#define FW_RX_DESC_INFO0_INSPECT (1 << 5) +#define FW_RX_DESC_INFO0_DISCARD BIT(0) +#define FW_RX_DESC_INFO0_FORWARD BIT(1) +#define FW_RX_DESC_INFO0_INSPECT BIT(5) #define FW_RX_DESC_INFO0_EXT_MASK 0xC0 #define FW_RX_DESC_INFO0_EXT_LSB 6 -- cgit v1.2.3 From 777b4690fc4a39a753da97541de2d4fab3dc65b3 Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Wed, 9 May 2018 13:06:52 +0300 Subject: wil6210: disable tracing config option Disable WIL6210_TRACING by default to avoid its performance overhead. Signed-off-by: Alexei Avshalom Lazar Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index b448926b0c0f..3548e8d5e18e 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -33,7 +33,7 @@ config WIL6210_TRACING bool "wil6210 tracing support" depends on WIL6210 depends on EVENT_TRACING - default y + default n ---help--- Say Y here to enable tracepoints for the wil6210 driver using the kernel tracing infrastructure. Select this -- cgit v1.2.3 From 8a4fa21438e38ed2db8c01a282de3995a6c0d75f Mon Sep 17 00:00:00 2001 From: Ahmad Masri Date: Wed, 9 May 2018 13:06:53 +0300 Subject: wil6210: align to latest auto generated wmi.h Align to latest version of the auto generated wmi file describing the interface with FW Signed-off-by: Ahmad Masri Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.c | 6 +- drivers/net/wireless/ath/wil6210/wmi.h | 387 +++++++++++++++++++++++++++++++-- 2 files changed, 371 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index a3dda9a97c1f..73efa13bc742 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1554,7 +1554,9 @@ int wmi_pcp_start(struct wil6210_vif *vif, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, - .disable_ap_sme = disable_ap_sme, + .ap_sme_offload_mode = disable_ap_sme ? + WMI_AP_SME_OFFLOAD_PARTIAL : + WMI_AP_SME_OFFLOAD_FULL, .abft_len = wil->abft_len, }; struct { @@ -1574,7 +1576,7 @@ int wmi_pcp_start(struct wil6210_vif *vif, } if (disable_ap_sme && - !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + !test_bit(WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL, wil->fw_capabilities)) { wil_err(wil, "disable_ap_sme not supported by FW\n"); return -EOPNOTSUPP; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index d3e75f0ff245..dc503d903786 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity * @@ -29,8 +30,6 @@ #ifndef __WILOCITY_WMI_H__ #define __WILOCITY_WMI_H__ -/* General */ -#define WMI_MAX_ASSOC_STA (8) #define WMI_DEFAULT_ASSOC_STA (1) #define WMI_MAC_LEN (6) #define WMI_PROX_RANGE_NUM (3) @@ -41,6 +40,19 @@ #define WMI_RF_ETYPE_LENGTH (3) #define WMI_RF_RX2TX_LENGTH (3) #define WMI_RF_ETYPE_VAL_PER_RANGE (5) +/* DTYPE configuration array size + * must always be kept equal to (WMI_RF_DTYPE_LENGTH+1) + */ +#define WMI_RF_DTYPE_CONF_LENGTH (4) +/* ETYPE configuration array size + * must always be kept equal to + * (WMI_RF_ETYPE_LENGTH+WMI_RF_ETYPE_VAL_PER_RANGE) + */ +#define WMI_RF_ETYPE_CONF_LENGTH (8) +/* RX2TX configuration array size + * must always be kept equal to (WMI_RF_RX2TX_LENGTH+1) + */ +#define WMI_RF_RX2TX_CONF_LENGTH (4) /* Mailbox interface * used for commands and events @@ -61,7 +73,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PS_CONFIG = 1, WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, - WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL = 4, WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, WMI_FW_CAPABILITY_D3_SUSPEND = 8, @@ -73,6 +85,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_PNO = 15, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, WMI_FW_CAPABILITY_MAX, }; @@ -164,12 +177,14 @@ enum wmi_command_id { WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, + WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID = 0x85F, /* Performance monitoring commands */ WMI_BF_CTRL_CMDID = 0x862, WMI_NOTIFY_REQ_CMDID = 0x863, WMI_GET_STATUS_CMDID = 0x864, WMI_GET_RF_STATUS_CMDID = 0x866, WMI_GET_BASEBAND_TYPE_CMDID = 0x867, + WMI_VRING_SWITCH_TIMING_CONFIG_CMDID = 0x868, WMI_UNIT_TEST_CMDID = 0x900, WMI_FLASH_READ_CMDID = 0x902, WMI_FLASH_WRITE_CMDID = 0x903, @@ -202,6 +217,7 @@ enum wmi_command_id { WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, /* Read Power Save profile type */ WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, + WMI_TSF_SYNC_CMDID = 0x973, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -218,11 +234,16 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_BF_CONTROL_CMDID = 0x9AA, WMI_SCHEDULING_SCHEME_CMDID = 0xA01, WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, + WMI_GET_ASSOC_LIST_CMDID = 0xA06, + WMI_GET_CCA_INDICATIONS_CMDID = 0xA07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08, + WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, @@ -484,6 +505,18 @@ enum wmi_rf_mgmt_type { WMI_RF_MGMT_GET_STATUS = 0x02, }; +/* WMI_BF_CONTROL_CMDID */ +enum wmi_bf_triggers { + WMI_BF_TRIGGER_RS_MCS1_TH_FAILURE = 0x01, + WMI_BF_TRIGGER_RS_MCS1_NO_BACK_FAILURE = 0x02, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP = 0x04, + WMI_BF_TRIGGER_MAX_BACK_FAILURE = 0x08, + WMI_BF_TRIGGER_FW = 0x10, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_KEEP_ALIVE = 0x20, + WMI_BF_TRIGGER_AOA = 0x40, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_UPM = 0x80, +}; + /* WMI_RF_MGMT_CMDID */ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; @@ -519,7 +552,9 @@ struct wmi_bcon_ctrl_cmd { u8 disable_sec; u8 hidden_ssid; u8 is_go; - u8 reserved[2]; + /* A-BFT length override if non-0 */ + u8 abft_len; + u8 reserved; } __packed; /* WMI_PORT_ALLOCATE_CMDID */ @@ -584,6 +619,16 @@ struct wmi_power_mgmt_cfg_cmd { u8 reserved[3]; } __packed; +/* WMI_PCP_START_CMDID */ +enum wmi_ap_sme_offload_mode { + /* Full AP SME in FW */ + WMI_AP_SME_OFFLOAD_FULL = 0x00, + /* Probe AP SME in FW */ + WMI_AP_SME_OFFLOAD_PARTIAL = 0x01, + /* AP SME in host */ + WMI_AP_SME_OFFLOAD_NONE = 0x02, +}; + /* WMI_PCP_START_CMDID */ struct wmi_pcp_start_cmd { __le16 bcon_interval; @@ -593,7 +638,8 @@ struct wmi_pcp_start_cmd { u8 reserved0[5]; /* A-BFT length override if non-0 */ u8 abft_len; - u8 disable_ap_sme; + /* enum wmi_ap_sme_offload_mode_e */ + u8 ap_sme_offload_mode; u8 network_type; u8 channel; u8 disable_sec_offload; @@ -607,6 +653,17 @@ struct wmi_sw_tx_req_cmd { u8 payload[0]; } __packed; +/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */ +struct wmi_vring_switch_timing_config_cmd { + /* Set vring timing configuration: + * + * defined interval for vring switch + */ + __le32 interval_usec; + /* vring inactivity threshold */ + __le32 idle_th_usec; +} __packed; + struct wmi_sw_ring_cfg { __le64 ring_mem_base; __le16 ring_size; @@ -642,6 +699,7 @@ enum wmi_vring_cfg_schd_params_priority { WMI_SCH_PRIO_HIGH = 0x01, }; +#define CIDXTID_EXTENDED_CID_TID (0xFF) #define CIDXTID_CID_POS (0) #define CIDXTID_CID_LEN (4) #define CIDXTID_CID_MSK (0xF) @@ -662,6 +720,9 @@ struct wmi_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; /* 0-23 vrings */ u8 ringid; + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 encap_trans_type; /* 802.3 DS cfg */ @@ -671,6 +732,11 @@ struct wmi_vring_cfg { u8 to_resolution; u8 agg_max_wsize; struct wmi_vring_cfg_schd schd_params; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; enum wmi_vring_cfg_cmd_action { @@ -868,23 +934,42 @@ struct wmi_cfg_rx_chain_cmd { /* WMI_RCP_ADDBA_RESP_CMDID */ struct wmi_rcp_addba_resp_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; __le16 status_code; /* ieee80211_ba_parameterset field to send */ __le16 ba_param_set; __le16 ba_timeout; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_RCP_ADDBA_REQ_CMDID */ struct wmi_rcp_addba_req_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset field as it received */ @@ -892,6 +977,11 @@ struct wmi_rcp_addba_req_cmd { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_SET_MAC_ADDRESS_CMDID */ @@ -902,15 +992,20 @@ struct wmi_set_mac_address_cmd { /* WMI_ECHO_CMDID * Check FW is alive - * WMI_DEEP_ECHO_CMDID - * Check FW and ucode are alive * Returned event: WMI_ECHO_RSP_EVENTID - * same event for both commands */ struct wmi_echo_cmd { __le32 value; } __packed; +/* WMI_DEEP_ECHO_CMDID + * Check FW and ucode are alive + * Returned event: WMI_ECHO_RSP_EVENTID + */ +struct wmi_deep_echo_cmd { + __le32 value; +} __packed; + /* WMI_RF_PWR_ON_DELAY_CMDID * set FW time parameters used through RF resetting * RF reset consists of bringing its power down for a period of time, then @@ -928,7 +1023,7 @@ struct wmi_rf_pwr_on_delay_cmd { __le16 up_delay_usec; } __packed; -/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID * This API controls the Tx and Rx gain over temperature. * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. * It also controls the Tx gain index, by controlling the Rx to Tx gain index @@ -942,25 +1037,46 @@ struct wmi_set_high_power_table_params_cmd { u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; u8 reserved0; /* Tx D-type values to be used for each temperature range */ - __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 tx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; + /* Temperature range for Tx E-type parameters */ + u8 tx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved1; + /* Tx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 tx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for Rx D-type parameters */ u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; - u8 reserved1; + u8 reserved2; /* Rx D-type values to be used for each temperature range */ - __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 rx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; /* Temperature range for Rx E-type parameters */ u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; - u8 reserved2; + u8 reserved3; /* Rx E-type values to be used for each temperature range. * The last 4 values of any range are the first 4 values of the next * range and so on */ - __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + __le32 rx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for rx_2_tx_offs parameters */ u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; - u8 reserved3; + u8 reserved4; /* Rx to Tx gain index offset */ - s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; + s8 rx_2_tx_offs[WMI_RF_RX2TX_CONF_LENGTH]; +} __packed; + +/* WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID + * This API sets rd parameter per mcs. + * Relevant only in Fixed Scheduling mode. + * Returned event: WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID + */ +struct wmi_fixed_scheduling_ul_config_cmd { + /* Use mcs -1 to set for every mcs */ + s8 mcs; + /* Number of frames with rd bit set in a single virtual slot */ + u8 rd_count_per_slot; + u8 reserved[2]; } __packed; /* CMD: WMI_RF_XPM_READ_CMDID */ @@ -1267,6 +1383,93 @@ struct wmi_set_long_range_config_complete_event { u8 reserved[3]; } __packed; +/* payload max size is 236 bytes: max event buffer size (256) - WMI headers + * (16) - prev struct field size (4) + */ +#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236) +#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236) +#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236) + +enum wmi_internal_fw_ioctl_code { + WMI_INTERNAL_FW_CODE_NONE = 0x0, + WMI_INTERNAL_FW_CODE_QCOM = 0x1, +}; + +/* WMI_INTERNAL_FW_IOCTL_CMDID */ +struct wmi_internal_fw_ioctl_cmd { + /* enum wmi_internal_fw_ioctl_code */ + __le16 code; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_IOCTL_EVENTID */ +struct wmi_internal_fw_ioctl_event { + /* wmi_fw_status */ + u8 status; + u8 reserved; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_EVENT_EVENTID */ +struct wmi_internal_fw_event_event { + __le16 id; + __le16 length; + /* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_BF_CONTROL_CMDID */ +struct wmi_bf_control_cmd { + /* wmi_bf_triggers */ + __le32 triggers; + u8 cid; + /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */ + u8 txss_mode; + /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */ + u8 brp_mode; + /* Max cts threshold (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_thr; + /* Max cts threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_dense_thr; + /* Max b-ack threshold (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_thr; + /* Max b-ack threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_dense_thr; + u8 reserved0; + /* Wrong sectors threshold */ + __le32 wrong_sector_bis_thr; + /* BOOL to enable/disable long term trigger */ + u8 long_term_enable; + /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and + * long_term_trig_timeout_per_mcs arrays, 0 = Ignore + */ + u8 long_term_update_thr; + /* Long term throughput threshold [Mbps] */ + u8 long_term_mbps_th_tbl[WMI_NUM_MCS]; + u8 reserved1; + /* Long term timeout threshold table [msec] */ + __le16 long_term_trig_timeout_per_mcs[WMI_NUM_MCS]; + u8 reserved2[2]; +} __packed; + /* WMI Events * List of Events (target to host) */ @@ -1325,6 +1528,7 @@ enum wmi_event_id { WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, + WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID = 0x185F, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, @@ -1334,6 +1538,7 @@ enum wmi_event_id { WMI_VRING_EN_EVENTID = 0x1865, WMI_GET_RF_STATUS_EVENTID = 0x1866, WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, + WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868, WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, @@ -1363,6 +1568,7 @@ enum wmi_event_id { WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, /* return the Power Save profile */ WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, + WMI_TSF_SYNC_STATUS_EVENTID = 0x1973, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, @@ -1380,17 +1586,24 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_BF_CONTROL_EVENTID = 0x19AA, WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, + WMI_GET_ASSOC_LIST_RES_EVENTID = 0x1A06, + WMI_GET_CCA_INDICATIONS_EVENTID = 0x1A07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08, + WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A, + WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_INTERNAL_FW_SET_CHANNEL = 0x9006, WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; @@ -1462,12 +1675,16 @@ enum rf_type { RF_UNKNOWN = 0x00, RF_MARLON = 0x01, RF_SPARROW = 0x02, + RF_TALYNA1 = 0x03, + RF_TALYNA2 = 0x04, }; /* WMI_GET_RF_STATUS_EVENTID */ enum board_file_rf_type { BF_RF_MARLON = 0x00, BF_RF_SPARROW = 0x01, + BF_RF_TALYNA1 = 0x02, + BF_RF_TALYNA2 = 0x03, }; /* WMI_GET_RF_STATUS_EVENTID */ @@ -1507,6 +1724,7 @@ enum baseband_type { BASEBAND_SPARROW_M_C0 = 0x06, BASEBAND_SPARROW_M_D0 = 0x07, BASEBAND_TALYN_M_A0 = 0x08, + BASEBAND_TALYN_M_B0 = 0x09, }; /* WMI_GET_BASEBAND_TYPE_EVENTID */ @@ -1551,7 +1769,11 @@ struct wmi_ready_event { u8 numof_additional_mids; /* rfc read calibration result. 5..15 */ u8 rfc_read_calib_result; - u8 reserved[3]; + /* Max associated STAs supported by FW in AP mode (default 0 means 8 + * STA) + */ + u8 max_assoc_sta; + u8 reserved[2]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ @@ -1666,13 +1888,13 @@ enum wmi_pno_result { }; struct wmi_start_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; struct wmi_stop_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; @@ -1739,9 +1961,17 @@ struct wmi_ba_status_event { /* WMI_DELBA_EVENTID */ struct wmi_delba_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 from_initiator; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_VRING_CFG_DONE_EVENTID */ @@ -1754,13 +1984,24 @@ struct wmi_vring_cfg_done_event { /* WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 status; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset as it received */ @@ -1768,6 +2009,11 @@ struct wmi_rcp_addba_req_event { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_CFG_RX_CHAIN_DONE_EVENTID */ @@ -1942,6 +2188,13 @@ struct wmi_set_high_power_table_params_event { u8 reserved[3]; } __packed; +/* WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID */ +struct wmi_fixed_scheduling_ul_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures @@ -2290,6 +2543,8 @@ struct wmi_link_maintain_cfg { __le32 bad_beacons_num_threshold; /* SNR limit for bad_beacons_detector */ __le32 bad_beacons_snr_threshold_db; + /* timeout for disassoc response frame in uSec */ + __le32 disconnect_timeout; } __packed; /* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */ @@ -2519,6 +2774,7 @@ enum wmi_tof_session_end_status { WMI_TOF_SESSION_END_FAIL = 0x01, WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02, WMI_TOF_SESSION_END_ABORTED = 0x03, + WMI_TOF_SESSION_END_BUSY = 0x04, }; /* WMI_TOF_SESSION_END_EVENTID */ @@ -2925,7 +3181,40 @@ struct wmi_set_silent_rssi_table_done_event { __le32 table; } __packed; -/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +/* WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID */ +struct wmi_vring_switch_timing_config_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_ASSOC_LIST_RES_EVENTID */ +struct wmi_assoc_sta_info { + u8 mac[WMI_MAC_LEN]; + u8 omni_index_address; + u8 reserved; +} __packed; + +#define WMI_GET_ASSOC_LIST_SIZE (8) + +/* WMI_GET_ASSOC_LIST_RES_EVENTID + * Returns up to MAX_ASSOC_STA_LIST_SIZE associated STAs + */ +struct wmi_get_assoc_list_res_event { + struct wmi_assoc_sta_info assoc_sta_list[WMI_GET_ASSOC_LIST_SIZE]; + /* STA count */ + u8 count; + u8 reserved[3]; +} __packed; + +/* WMI_BF_CONTROL_EVENTID */ +struct wmi_bf_control_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */ struct wmi_command_not_supported_event { /* device id */ u8 mid; @@ -2936,4 +3225,62 @@ struct wmi_command_not_supported_event { __le16 reserved1; } __packed; +/* WMI_TSF_SYNC_CMDID */ +struct wmi_tsf_sync_cmd { + /* The time interval to send announce frame in one BI */ + u8 interval_ms; + /* The mcs to send announce frame */ + u8 mcs; + u8 reserved[6]; +} __packed; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +enum wmi_tsf_sync_status { + WMI_TSF_SYNC_SUCCESS = 0x00, + WMI_TSF_SYNC_FAILED = 0x01, + WMI_TSF_SYNC_REJECTED = 0x02, +}; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +struct wmi_tsf_sync_status_event { + /* enum wmi_tsf_sync_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_CCA_INDICATIONS_EVENTID */ +struct wmi_get_cca_indications_event { + /* wmi_fw_status */ + u8 status; + /* CCA-Energy Detect in percentage over last BI (0..100) */ + u8 cca_ed_percent; + /* Averaged CCA-Energy Detect in percent over number of BIs (0..100) */ + u8 cca_ed_avg_percent; + /* NAV percent over last BI (0..100) */ + u8 nav_percent; + /* Averaged NAV percent over number of BIs (0..100) */ + u8 nav_avg_percent; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID */ +struct wmi_set_cca_indications_bi_avg_num_cmd { + /* set the number of bis to average cca_ed (0..255) */ + u8 bi_number; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID */ +struct wmi_set_cca_indications_bi_avg_num_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_INTERNAL_FW_SET_CHANNEL */ +struct wmi_internal_fw_set_channel_event { + u8 channel_num; + u8 reserved[3]; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ -- cgit v1.2.3 From 9861bf3b818fbe810442b89230b80c0385ef9e04 Mon Sep 17 00:00:00 2001 From: Lior David Date: Wed, 9 May 2018 13:06:54 +0300 Subject: wil6210: fix call to wil6210_disconnect during unload Move the call to wil6210_disconnect so it will be called before unregister_netdevice. This is because it calls netif_carrier_off which is forbidden to call on an unregistered net device. Calling netif_carrier_off can add a link watch event which might be handled after net device was freed, causing a kernel oops. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/netdev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 05e9408e7ea3..eb6c14ed65a4 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -457,16 +457,16 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid) return; } + mutex_lock(&wil->mutex); + wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); + mutex_unlock(&wil->mutex); + ndev = vif_to_ndev(vif); /* during unregister_netdevice cfg80211_leave may perform operations * such as stop AP, disconnect, so we only clear the VIF afterwards */ unregister_netdevice(ndev); - mutex_lock(&wil->mutex); - wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); - mutex_unlock(&wil->mutex); - if (any_active && vif->mid != 0) wmi_port_delete(wil, vif->mid); -- cgit v1.2.3 From 5f85c7e702d2185d3d553982f916b5ac96ae77a9 Mon Sep 17 00:00:00 2001 From: Lior David Date: Wed, 9 May 2018 13:06:55 +0300 Subject: wil6210: change reply_size arg to u16 in wmi_call Change the type of the argument reply_size from u8 to u16 in order to support reply sizes > 255 bytes. The driver already supports u16 reply size in all other places, this was the only missing change. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wil6210.h | 2 +- drivers/net/wireless/ath/wil6210/wmi.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f9c5155025bc..9ac92921d47c 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -986,7 +986,7 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len); void wmi_recv_cmd(struct wil6210_priv *wil); int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec); + u16 reply_id, void *reply, u16 reply_size, int to_msec); void wmi_event_worker(struct work_struct *work); void wmi_event_flush(struct wil6210_priv *wil); int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 73efa13bc742..fcd95299eb4f 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1416,7 +1416,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec) + u16 reply_id, void *reply, u16 reply_size, int to_msec) { int rc; unsigned long remain; -- cgit v1.2.3 From 1c21cc5fc4df4997eddc6e0b5c5bad225c3f1ecc Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Wed, 9 May 2018 13:06:56 +0300 Subject: wil6210: move WMI functionality out of wil_cfg80211_mgmt_tx Rearrange the code by having new function wmi_mgmt_tx() to take care of the WMI part of wil_cfg80211_mgmt_tx(). Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 39 +++--------------------- drivers/net/wireless/ath/wil6210/wil6210.h | 1 + drivers/net/wireless/ath/wil6210/wmi.c | 47 +++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 35 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index cdbb393863f3..109bae1436db 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1081,17 +1081,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, u64 *cookie) { const u8 *buf = params->buf; - size_t len = params->len, total; + size_t len = params->len; struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - bool tx_status = false; - struct ieee80211_mgmt *mgmt_frame = (void *)buf; - struct wmi_sw_tx_req_cmd *cmd; - struct { - struct wmi_cmd_hdr wmi; - struct wmi_sw_tx_complete_event evt; - } __packed evt; + bool tx_status; /* Note, currently we do not support the "wait" parameter, user-space * must call remain_on_channel before mgmt_tx or listen on a channel @@ -1100,34 +1094,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); - wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, - len, true); - - if (len < sizeof(struct ieee80211_hdr_3addr)) - return -EINVAL; - - total = sizeof(*cmd) + len; - if (total < len) - return -EINVAL; - - cmd = kmalloc(total, GFP_KERNEL); - if (!cmd) { - rc = -ENOMEM; - goto out; - } - - memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); - cmd->len = cpu_to_le16(len); - memcpy(cmd->payload, buf, len); - - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, - WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); - if (rc == 0) - tx_status = !evt.evt.status; + rc = wmi_mgmt_tx(vif, buf, len); + tx_status = (rc == 0); - kfree(cmd); - out: cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); return rc; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9ac92921d47c..c9120dc8a435 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1150,5 +1150,6 @@ void wil6210_clear_halp(struct wil6210_priv *wil); int wmi_start_sched_scan(struct wil6210_priv *wil, struct cfg80211_sched_scan_request *request); int wmi_stop_sched_scan(struct wil6210_priv *wil); +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index fcd95299eb4f..bf110e3b4477 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -2773,3 +2773,50 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) return 0; } + +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) +{ + size_t total; + struct wil6210_priv *wil = vif_to_wil(vif); + struct ieee80211_mgmt *mgmt_frame = (void *)buf; + struct wmi_sw_tx_req_cmd *cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_sw_tx_complete_event evt; + } __packed evt = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + int rc; + + wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + + total = sizeof(*cmd) + len; + if (total < len) { + wil_err(wil, "mgmt_tx invalid len %zu\n", len); + return -EINVAL; + } + + cmd = kmalloc(total, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); + cmd->len = cpu_to_le16(len); + memcpy(cmd->payload, buf, len); + + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status); + rc = -EINVAL; + } + + kfree(cmd); + + return rc; +} -- cgit v1.2.3 From 807b086053df9bdbc9bf732130e9acda1c161aa5 Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Wed, 9 May 2018 13:06:57 +0300 Subject: wil6210: Initialize reply struct of the WMI commands WMI command reply saved in uninitialized struct. In order to avoid accessing unset values from FW initialize the reply struct. Signed-off-by: Alexei Avshalom Lazar Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 22 ++++--- drivers/net/wireless/ath/wil6210/debugfs.c | 2 + drivers/net/wireless/ath/wil6210/main.c | 2 + drivers/net/wireless/ath/wil6210/txrx.c | 8 ++- drivers/net/wireless/ath/wil6210/wmi.c | 97 ++++++++++++++++++----------- 5 files changed, 85 insertions(+), 46 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 109bae1436db..78946f28d0c7 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -276,6 +276,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -2246,7 +2248,9 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_get_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; struct nlattr *nl_cfgs, *nl_cfg; u32 i; @@ -2292,7 +2296,6 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2367,7 +2370,9 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_set_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct nlattr *nl_cfg; struct wmi_rf_sector_info *si; @@ -2450,7 +2455,6 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, } cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2474,7 +2478,9 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_get_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) @@ -2514,7 +2520,6 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, memset(&cmd, 0, sizeof(cmd)); cmd.cid = (u8)cid; cmd.sector_type = sector_type; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, @@ -2555,14 +2560,15 @@ static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_set_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; int rc; memset(&cmd, 0, sizeof(cmd)); cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.cid = (u8)cid; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid, &cmd, sizeof(cmd), WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8c90b3111f0b..d3b1069ebf7a 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1078,6 +1078,8 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) struct wmi_notify_req_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { u32 status; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 82aec6b06d09..e7006c2428a0 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -342,6 +342,8 @@ void wil_disconnect_worker(struct work_struct *work) /* already disconnected */ return; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0, WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), WIL6210_DISCONNECT_TO_MS); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index b60b9fcaaebd..411130a4f2ed 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -963,7 +963,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; @@ -1045,7 +1047,9 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index bf110e3b4477..8a9bbd6bcea8 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1509,7 +1509,9 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable) struct { struct wmi_cmd_hdr wmi; struct wmi_led_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_FW_STATUS_FAILURE)}, + }; if (led_id == WIL_LED_INVALID_ID) goto out; @@ -1562,7 +1564,9 @@ int wmi_pcp_start(struct wil6210_vif *vif, struct { struct wmi_cmd_hdr wmi; struct wmi_pcp_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; if (!vif->privacy) cmd.disable_sec = 1; @@ -1639,6 +1643,8 @@ int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid) } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0, WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1674,6 +1680,8 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) struct wmi_set_pcp_channel_cmd cmd; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0, WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1699,7 +1707,9 @@ int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi) struct { struct wmi_cmd_hdr wmi; struct wmi_p2p_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); @@ -1720,7 +1730,9 @@ int wmi_start_listen(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); @@ -1742,7 +1754,9 @@ int wmi_start_search(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_search_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); @@ -1868,7 +1882,9 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_info(wil, "(%s)\n", on ? "on" : "off"); @@ -1910,6 +1926,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) } __packed evt; int rc; + memset(&evt, 0, sizeof(evt)); + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { struct ieee80211_channel *ch = wil->monitor_chandef.chan; @@ -1939,14 +1957,14 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) if (rc) return rc; + if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) + rc = -EINVAL; + vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", le32_to_cpu(evt.evt.status), vring->hwtail); - if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) - rc = -EINVAL; - return rc; } @@ -1964,6 +1982,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) struct wmi_temp_sense_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -1996,6 +2016,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); + memset(&reply, 0, sizeof(reply)); vif->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); @@ -2094,7 +2115,9 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_rcp_addba_resp_sent_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, + }; wil_dbg_wmi(wil, "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n", @@ -2127,13 +2150,13 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_ps_dev_profile_cfg_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR)}, + }; u32 status; wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile); - reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR); - rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply), @@ -2162,15 +2185,15 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) struct { struct wmi_cmd_hdr wmi; struct wmi_set_mgmt_retry_limit_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short); if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), @@ -2201,7 +2224,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.mgmt_retry_limit = 0; + memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0, WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), 100); @@ -2284,14 +2307,15 @@ int wmi_suspend(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_suspend_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE}, + }; + u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP; wil->suspend_resp_rcvd = false; wil->suspend_resp_comp = false; - reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE; - rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), @@ -2367,10 +2391,11 @@ int wmi_resume(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_resume_event evt; - } __packed reply; - - reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; - reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_RESUME_FAILED, + .resume_triggers = + cpu_to_le32(WMI_RESUME_TRIGGER_UNKNOWN)}, + }; rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0, WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), @@ -2396,7 +2421,9 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, struct { struct wmi_cmd_hdr wmi; struct wmi_port_allocated_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n", mid, iftype, mac); @@ -2421,8 +2448,6 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, return -EINVAL; } - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_ALLOCATED_EVENTID, &reply, @@ -2449,12 +2474,12 @@ int wmi_port_delete(struct wil6210_priv *wil, u8 mid) struct { struct wmi_cmd_hdr wmi; struct wmi_port_deleted_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port delete, mid %d\n", mid); - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_DELETED_EVENTID, &reply, @@ -2711,7 +2736,9 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_start_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; @@ -2727,8 +2754,6 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, wmi_sched_scan_set_plans(wil, &cmd, request->scan_plans, request->n_scan_plans); - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply), @@ -2752,13 +2777,13 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_stop_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0, WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); -- cgit v1.2.3 From 37f8d26d8347f659e4677b4e438708ce492262bf Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Wed, 9 May 2018 13:06:58 +0300 Subject: wil6210: remove unused rx_reorder members Remove unused members from struct wil_tid_ampdu_rx Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 3 +-- drivers/net/wireless/ath/wil6210/rx_reorder.c | 7 +------ drivers/net/wireless/ath/wil6210/wil6210.h | 10 ---------- 3 files changed, 2 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index d3b1069ebf7a..524a7d689833 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1379,8 +1379,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; - seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout, - r->head_seq_num); + seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num); for (i = 0; i < r->buf_size; i++) { if (i == index) seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|'); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 14dcb0698dee..76f8084c1fd8 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -206,7 +206,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* put the frame in the reordering buffer */ r->reorder_buf[index] = skb; - r->reorder_time[index] = jiffies; r->stored_mpdu_num++; wil_reorder_release(ndev, r); @@ -252,11 +251,8 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, r->reorder_buf = kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); - r->reorder_time = - kcalloc(size, sizeof(unsigned long), GFP_KERNEL); - if (!r->reorder_buf || !r->reorder_time) { + if (!r->reorder_buf) { kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); return NULL; } @@ -286,7 +282,6 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, kfree_skb(r->reorder_buf[i]); kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index c9120dc8a435..b623510c6f6c 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -493,38 +493,28 @@ struct pci_dev; * struct tid_ampdu_rx - TID aggregation information (Rx). * * @reorder_buf: buffer to reorder incoming aggregated MPDUs - * @reorder_time: jiffies when skb was added - * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) - * @reorder_timer: releases expired frames from the reorder buffer. * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs - * @timeout: reset timer value (in TUs). * @ssn_last_drop: SSN of the last dropped frame * @total: total number of processed incoming frames * @drop_dup: duplicate frames dropped for this reorder buffer * @drop_old: old frames dropped for this reorder buffer - * @dialog_token: dialog token for aggregation session * @first_time: true when this buffer used 1-st time */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; - unsigned long *reorder_time; - struct timer_list session_timer; - struct timer_list reorder_timer; unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; u16 buf_size; - u16 timeout; u16 ssn_last_drop; unsigned long long total; /* frames processed */ unsigned long long drop_dup; unsigned long long drop_old; - u8 dialog_token; bool first_time; /* is it 1-st time this buffer used? */ }; -- cgit v1.2.3 From 3d6b72729cc2933906de8d2c602ae05e920b2122 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Wed, 9 May 2018 13:06:59 +0300 Subject: wil6210: rate limit wil_rx_refill error wil_err inside wil_rx_refill can flood the log buffer. Replace it with wil_err_ratelimited. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/txrx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 411130a4f2ed..b9a9fa828961 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -652,8 +652,8 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) v->swtail = next_tail) { rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); if (unlikely(rc)) { - wil_err(wil, "Error %d in wil_rx_refill[%d]\n", - rc, v->swtail); + wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", + rc, v->swtail); break; } } -- cgit v1.2.3 From e691b771aa7f660c42697c15b86ada2180e68280 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 10 May 2018 05:59:40 -0700 Subject: i40evf: Fix client header define Fix up the VF client header define, since it is the same as the PF client header. Signed-off-by: Jeff Kirsher Tested-by: Andrew Bowers --- drivers/net/ethernet/intel/i40evf/i40evf_client.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h index fc6592c3de9c..5585f362048a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_CLIENT_H_ -#define _I40E_CLIENT_H_ +#ifndef _I40EVF_CLIENT_H_ +#define _I40EVF_CLIENT_H_ #define I40EVF_CLIENT_STR_LENGTH 10 @@ -166,4 +166,4 @@ struct i40e_client { /* used by clients */ int i40evf_register_client(struct i40e_client *client); int i40evf_unregister_client(struct i40e_client *client); -#endif /* _I40E_CLIENT_H_ */ +#endif /* _I40EVF_CLIENT_H_ */ -- cgit v1.2.3 From 0ded9c61c13746991cbcc098cf00715526627e9a Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:41 -0700 Subject: i40e: calculate ethtool stats size in a separate function Use a separate function to calculate the number of stats for a particular device. This helps reduce the clutter in i40e_get_sset_count(). Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 28 +++++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index fc6a5eef141c..a62142f033d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1658,6 +1658,23 @@ done: return err; } +static int i40e_get_stats_count(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { + if (pf->lan_veb != I40E_NO_VEB && + pf->flags & I40E_FLAG_VEB_STATS_ENABLED) + return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL; + else + return I40E_PF_STATS_LEN(netdev); + } else { + return I40E_VSI_STATS_LEN(netdev); + } +} + static int i40e_get_sset_count(struct net_device *netdev, int sset) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -1668,16 +1685,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: - if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { - int len = I40E_PF_STATS_LEN(netdev); - - if ((pf->lan_veb != I40E_NO_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) - len += I40E_VEB_STATS_TOTAL; - return len; - } else { - return I40E_VSI_STATS_LEN(netdev); - } + return i40e_get_stats_count(netdev); case ETH_SS_PRIV_FLAGS: return I40E_PRIV_FLAGS_STR_LEN + (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); -- cgit v1.2.3 From 7e20188176604bf10ac396efe31ddf39d3eb8cf4 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:42 -0700 Subject: i40e: remove duplicate pfc stats The pfc related priority stats are already handled separately as these stats are actually arrays of length I40E_MAX_USER_PRIORITY. Thus, including them within i40e_gstrings_stats will just duplicate data. Worse, the sizeof will be incorrect, as it will be the total size of the stat arrays, which in this case is 8 * sizeof(u64), so we will only copy the stat contents as if they were a u32. Since we already correctly handle these stats else where, remove them from the i40e_gstrings_stats. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a62142f033d2..0f237397f52b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -103,10 +103,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), - I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx), - I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx), - I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx), - I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx), I40E_PF_STAT("rx_size_64", stats.rx_size_64), I40E_PF_STAT("rx_size_127", stats.rx_size_127), I40E_PF_STAT("rx_size_255", stats.rx_size_255), -- cgit v1.2.3 From 132ee00eed22e540aadbd111c2f9619c9bcc6413 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:43 -0700 Subject: i40e: cleanup whitespace for some ethtool stat definitions A future patch is going to refactor some of the ethtool statistic code. To keep the patches easy to review, cleanup some of the indentation used for macro definitions first. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 0f237397f52b..e6e58e8404c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -19,13 +19,13 @@ struct i40e_stats { } #define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) #define I40E_PF_STAT(_name, _stat) \ - I40E_STAT(struct i40e_pf, _name, _stat) + I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ - I40E_STAT(struct i40e_vsi, _name, _stat) + I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ - I40E_STAT(struct i40e_veb, _name, _stat) + I40E_STAT(struct i40e_veb, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -144,9 +144,9 @@ static const struct i40e_stats i40e_gstrings_stats[] = { * 2 /* Tx and Rx together */ \ * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) -#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) +#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ I40E_MISC_STATS_LEN + \ I40E_QUEUE_STATS_LEN((n))) #define I40E_PFC_STATS_LEN ( \ -- cgit v1.2.3 From ca12c9d4213fe8203d58632cd8ad25b5abcc7ef2 Mon Sep 17 00:00:00 2001 From: Patryk Małek Date: Thu, 10 May 2018 05:59:44 -0700 Subject: i40e: Fix recalculation of MSI-X vectors for VMDq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a recalculation of number of MSI-X vectors for VMDq in the case where we have less vectors available than we would want to reserve for VMDq. It fixes the issue where we recalculate vectors left and vectors wanted but we didn't take into account the reduced number of queue pairs per VSI. Signed-off-by: Patryk Małek Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c8659fbd7111..f17867ab9a90 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10309,21 +10309,28 @@ static int i40e_init_msix(struct i40e_pf *pf) /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { - int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; - int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); - if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; } else { + int vmdq_vecs_wanted = + pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = + min_t(int, vectors_left, vmdq_vecs_wanted); + /* if we're short on vectors for what's desired, we limit * the queues per vmdq. If this is still more than are * available, the user will need to change the number of * queues/vectors used by the PF later with the ethtool * channels command */ - if (vmdq_vecs < vmdq_vecs_wanted) + if (vectors_left < vmdq_vecs_wanted) { pf->num_vmdq_qps = 1; + vmdq_vecs_wanted = pf->num_vmdq_vsis; + vmdq_vecs = min_t(int, + vectors_left, + vmdq_vecs_wanted); + } pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget += vmdq_vecs; -- cgit v1.2.3 From 3f76d01f3efeeafe2e9f3c74074d6eecff201466 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Thu, 10 May 2018 05:59:45 -0700 Subject: i40e: add tx_busy to ethtool stats This patch adds the tx_busy stat to the ethtool stats. The tx_busy stat tracks the number of times we return NETDEV_TX_BUSY to the stack during transmit. Signed-off-by: Harshitha Ramamurthy Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e6e58e8404c5..329e59eae4a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -66,6 +66,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("tx_busy", tx_busy), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; -- cgit v1.2.3 From aa4a0654035beaf8c64eddd0b254c299af548f4d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:46 -0700 Subject: i40evf: remove MAX_QUEUES and just use I40EVF_MAX_REQ_QUEUES We don't really need to have separate definitions for MAX_QUEUES and I40EVF_MAX_REQ_QUEUES, since we'll always be limited by how many queues we request anyways. If we haven't enabled requesting the maximum number of queues, there's no reason to have our call to alloc_etherdev_mq actually pass the higher value, since we'd never enable those queues anyways. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 1 - drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 98b834932dd3..96e537a35000 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -81,7 +81,6 @@ struct i40e_vsi { #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define MAX_QUEUES 16 #define I40EVF_MAX_REQ_QUEUES 4 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 3f04a182903d..95a222d7ae43 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2331,7 +2331,7 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > MAX_QUEUES) + if (num_qps > I40EVF_MAX_REQ_QUEUES) return -EINVAL; ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); @@ -3689,7 +3689,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), + I40EVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; -- cgit v1.2.3 From 9c0c3b83d32ed9678bcde50743497571b61759da Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:47 -0700 Subject: i40e: cleanup wording in a header comment Fix up the English in the header comment for i40e_ptp_tx_hang. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index aa3daec2049d..6706141a5ccd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -317,7 +317,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) * This watchdog task is run periodically to make sure that we clear the Tx * timestamp logic if we don't obtain a timestamp in a reasonable amount of * time. It is unexpected in the normal case but if it occurs it results in - * permanently prevent timestamps of future packets + * permanently preventing timestamps of future packets. **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { -- cgit v1.2.3 From c79756cb5f084736b138da9319a02f7c72644548 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 10 May 2018 05:59:48 -0700 Subject: i40e: free the skb after clearing the bitlock In commit bbc4e7d273b5 ("i40e: fix race condition with PTP_TX_IN_PROGRESS bits") we modified the code which handles Tx timestamps so that we would clear the progress bit as soon as possible. A later commit 0bc0706b46cd ("i40e: check for Tx timestamp timeouts during watchdog") introduced similar code for detecting and handling cleanup of a blocked Tx timestamp. This code did not use the same pattern for cleaning up the skb. Update this code to wait to free the skb until after the bit lock is free, by first setting the ptp_tx_skb to NULL and clearing the lock. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 6706141a5ccd..d50d84927e6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -321,6 +321,8 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { + struct sk_buff *skb; + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; @@ -333,9 +335,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { - dev_kfree_skb_any(pf->ptp_tx_skb); + skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + + /* Free the skb after we clear the bitlock */ + dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } } -- cgit v1.2.3 From 27392e57189e6e6f77e78fe9aeb3a2c7e2ccbdf4 Mon Sep 17 00:00:00 2001 From: Paweł Jabłoński Date: Thu, 10 May 2018 05:59:49 -0700 Subject: i40evf: Fix a hardware reset support in VF driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes a hardware reset support in VF driver. It is needed because when a hardware reset is detected adapter->state is in __I40EVF_RESETTING state before i40evf_reset_task is called. Without this patch unloading VF driver after a hardware reset ends with a system crash. Signed-off-by: Paweł Jabłoński Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 21 +++++++++++++++++++-- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 3 ++- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f17867ab9a90..b5daa5c9c7de 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2840,6 +2840,23 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, return ret; } +/** + * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path + * @netdev: network interface to be adjusted + * @proto: unused protocol value + * @vid: vlan id to be added + **/ +static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (vid >= VLAN_N_VID) + return; + set_bit(vid, vsi->active_vlans); +} + /** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted @@ -2882,8 +2899,8 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) - i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), - vid); + i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), + vid); } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 95a222d7ae43..a7b87f935411 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1925,7 +1925,8 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = (adapter->state == __I40EVF_RUNNING); + running = ((adapter->state == __I40EVF_RUNNING) || + (adapter->state == __I40EVF_RESETTING)); if (running) { netif_carrier_off(netdev); -- cgit v1.2.3 From 1dde532dd0520a948fbc82f4522183104ee4808b Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Wed, 2 May 2018 15:17:19 +0530 Subject: cxgb4: collect hardware dump in second kernel Register callback to collect hardware/firmware dumps in second kernel before hardware/firmware is initialized. The dumps for each device will be available as elf notes in /proc/vmcore in second kernel. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 ++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 25 ++++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h | 3 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 ++++++++++ 4 files changed, 42 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 688f95440af2..01e7aad4ce5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -50,6 +50,7 @@ #include #include #include +#include #include #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -964,6 +965,9 @@ struct adapter { struct hma_data hma; struct srq_data *srq; + + /* Dump buffer for collecting logs in kdump kernel */ + struct vmcoredd_data vmcoredd; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 143686c60234..085691eb2b95 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -488,3 +488,28 @@ void cxgb4_init_ethtool_dump(struct adapter *adapter) adapter->eth_dump.version = adapter->params.fw_vers; adapter->eth_dump.len = 0; } + +static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf) +{ + struct adapter *adap = container_of(data, struct adapter, vmcoredd); + u32 len = data->size; + + return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL); +} + +int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap) +{ + struct vmcoredd_data *data = &adap->vmcoredd; + u32 len; + + len = sizeof(struct cudbg_hdr) + + sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY; + len += CUDBG_DUMP_BUFF_SIZE; + + data->size = len; + snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s", + cxgb4_driver_name, adap->name); + data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect; + + return vmcore_add_device_dump(data); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index ce1ac9a1c878..ef59ba1ed968 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -41,8 +41,11 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ }; +#define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW) + u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag); int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, u32 flag); void cxgb4_init_ethtool_dump(struct adapter *adapter); +int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap); #endif /* __CXGB4_CUDBG_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c54fd189d835..07baa5e1a8c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5558,6 +5558,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_adapter; + if (is_kdump_kernel()) { + /* Collect hardware state and append to /proc/vmcore */ + err = cxgb4_cudbg_vmcore_add_dump(adapter); + if (err) { + dev_warn(adapter->pdev_dev, + "Fail collecting vmcore device dump, err: %d. Continuing\n", + err); + err = 0; + } + } if (!is_t4(adapter->params.chip)) { s_qpp = (QUEUESPERPAGEPF0_S + -- cgit v1.2.3 From 408d2debb03e15f3dd3063a5a14befd3390c6eab Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 11 May 2018 17:16:34 -0400 Subject: net: dsa: mv88e6xxx: use helper for 6390 histogram The Marvell 88E6390 model has its histogram mode bits moved in the Global 1 Control 2 register. Use the previously introduced mv88e6xxx_g1_ctl2_mask helper to set them. At the same time complete the documentation of the said register. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global1.c | 15 +++------------ drivers/net/dsa/mv88e6xxx/global1.h | 12 +++++++++--- 2 files changed, 12 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 244ee1ff9edc..0f2b05342c18 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -393,18 +393,9 @@ int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip) int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) { - u16 val; - int err; - - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &val); - if (err) - return err; - - val |= MV88E6XXX_G1_CTL2_HIST_RX_TX; - - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, val); - - return err; + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK, + MV88E6390_G1_CTL2_HIST_MODE_RX | + MV88E6390_G1_CTL2_HIST_MODE_TX); } int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index e186a026e1b1..c357b3ca9a09 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -201,12 +201,13 @@ /* Offset 0x1C: Global Control 2 */ #define MV88E6XXX_G1_CTL2 0x1c -#define MV88E6XXX_G1_CTL2_HIST_RX 0x0040 -#define MV88E6XXX_G1_CTL2_HIST_TX 0x0080 -#define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0 #define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000 #define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000 #define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_MASK 0xc000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_ORIG 0x0000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_MGMT 0x4000 +#define MV88E6390_G1_CTL2_HEADER_TYPE_LAG 0x8000 #define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000 #define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000 #define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000 @@ -223,6 +224,11 @@ #define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300 #define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600 #define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700 +#define MV88E6390_G1_CTL2_HIST_MODE_MASK 0x00c0 +#define MV88E6390_G1_CTL2_HIST_MODE_RX 0x0040 +#define MV88E6390_G1_CTL2_HIST_MODE_TX 0x0080 +#define MV88E6352_G1_CTL2_CTR_MODE_MASK 0x0060 +#define MV88E6390_G1_CTL2_CTR_MODE 0x0020 #define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f /* Offset 0x1D: Stats Operation Register */ -- cgit v1.2.3 From 93e18d61bfa950aaffff5fccde0f974e1e038f83 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 11 May 2018 17:16:35 -0400 Subject: net: dsa: mv88e6xxx: add IEEE and IP mapping ops All Marvell switch families except 88E6390 have direct registers in Global 1 for IEEE and IP priorities override mapping. The 88E6390 uses indirect tables instead. Add .ieee_pri_map and .ip_pri_map ops to distinct that and call them from a mv88e6xxx_pri_setup helper. Only non-6390 are concerned ATM. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 94 +++++++++++++++++++++++++------------ drivers/net/dsa/mv88e6xxx/chip.h | 3 ++ drivers/net/dsa/mv88e6xxx/global1.c | 58 +++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/global1.h | 3 ++ 4 files changed, 127 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 2910f68c0e5c..ad456d18ce8a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1104,6 +1104,25 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + if (chip->info->ops->ieee_pri_map) { + err = chip->info->ops->ieee_pri_map(chip); + if (err) + return err; + } + + if (chip->info->ops->ip_pri_map) { + err = chip->info->ops->ip_pri_map(chip); + if (err) + return err; + } + + return 0; +} + static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) { int target, port; @@ -2252,37 +2271,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) { int err; - /* Configure the IP ToS mapping registers. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff); - if (err) - return err; - - /* Configure the IEEE 802.1p priority mapping register. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41); - if (err) - return err; - /* Initialize the statistics unit */ err = mv88e6xxx_stats_set_histogram(chip); if (err) @@ -2365,6 +2353,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_pri_setup(chip); + if (err) + goto unlock; + /* Setup PTP Hardware Clock and timestamping */ if (chip->info->ptp_support) { err = mv88e6xxx_ptp_setup(chip); @@ -2592,6 +2584,8 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, static const struct mv88e6xxx_ops mv88e6085_ops = { /* MV88E6XXX_FAMILY_6097 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, @@ -2628,6 +2622,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { static const struct mv88e6xxx_ops mv88e6095_ops = { /* MV88E6XXX_FAMILY_6095 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -2652,6 +2648,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { static const struct mv88e6xxx_ops mv88e6097_ops = { /* MV88E6XXX_FAMILY_6097 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2686,6 +2684,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { static const struct mv88e6xxx_ops mv88e6123_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2714,6 +2714,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { static const struct mv88e6xxx_ops mv88e6131_ops = { /* MV88E6XXX_FAMILY_6185 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -2747,6 +2749,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { static const struct mv88e6xxx_ops mv88e6141_ops = { /* MV88E6XXX_FAMILY_6341 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -2784,6 +2788,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { static const struct mv88e6xxx_ops mv88e6161_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2817,6 +2823,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6165_phy_read, @@ -2843,6 +2851,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { static const struct mv88e6xxx_ops mv88e6171_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2877,6 +2887,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { static const struct mv88e6xxx_ops mv88e6172_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -2916,6 +2928,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { static const struct mv88e6xxx_ops mv88e6175_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2951,6 +2965,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { static const struct mv88e6xxx_ops mv88e6176_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -2990,6 +3006,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { static const struct mv88e6xxx_ops mv88e6185_ops = { /* MV88E6XXX_FAMILY_6185 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -3129,6 +3147,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { static const struct mv88e6xxx_ops mv88e6240_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3208,6 +3228,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { static const struct mv88e6xxx_ops mv88e6320_ops = { /* MV88E6XXX_FAMILY_6320 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3244,6 +3266,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { static const struct mv88e6xxx_ops mv88e6321_ops = { /* MV88E6XXX_FAMILY_6320 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3278,6 +3302,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { static const struct mv88e6xxx_ops mv88e6341_ops = { /* MV88E6XXX_FAMILY_6341 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3316,6 +3342,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { static const struct mv88e6xxx_ops mv88e6350_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -3350,6 +3378,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { static const struct mv88e6xxx_ops mv88e6351_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -3385,6 +3415,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { static const struct mv88e6xxx_ops mv88e6352_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 8eb5dfe17f5c..012268046442 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -294,6 +294,9 @@ struct mv88e6xxx_mdio_bus { }; struct mv88e6xxx_ops { + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); + int (*ip_pri_map)(struct mv88e6xxx_chip *chip); + /* Ingress Rate Limit unit (IRL) operations */ int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 0f2b05342c18..d721ccf7d8be 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -241,6 +241,64 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip) return mv88e6185_g1_wait_ppu_disabled(chip); } +/* Offset 0x10: IP-PRI Mapping Register 0 + * Offset 0x11: IP-PRI Mapping Register 1 + * Offset 0x12: IP-PRI Mapping Register 2 + * Offset 0x13: IP-PRI Mapping Register 3 + * Offset 0x14: IP-PRI Mapping Register 4 + * Offset 0x15: IP-PRI Mapping Register 5 + * Offset 0x16: IP-PRI Mapping Register 6 + * Offset 0x17: IP-PRI Mapping Register 7 + */ + +int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip) +{ + int err; + + /* Reset the IP TOS/DiffServ/Traffic priorities to defaults */ + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff); + if (err) + return err; + + return 0; +} + +/* Offset 0x18: IEEE-PRI Register */ + +int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip) +{ + /* Reset the IEEE Tag priorities to defaults */ + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41); +} + /* Offset 0x1a: Monitor Control */ /* Offset 0x1a: Monitor & MGMT Control on some devices */ diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index c357b3ca9a09..7c791c1da4b9 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -277,6 +277,9 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip); +int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip); + int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port); int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip); -- cgit v1.2.3 From 447b1bb84bb51603f57ae426973a620a7a66a0db Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 11 May 2018 17:16:36 -0400 Subject: net: dsa: mv88e6xxx: add a stats setup function Now that the Global 1 specific setup function only setup the statistics unit, kill it in favor of a mv88e6xxx_stats_setup function. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index ad456d18ce8a..b23c11d9f4b2 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -995,14 +995,6 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, } -static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip) -{ - if (chip->info->ops->stats_set_histogram) - return chip->info->ops->stats_set_histogram(chip); - - return 0; -} - static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) { return 32 * sizeof(u16); @@ -2267,14 +2259,16 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, return err; } -static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) { int err; /* Initialize the statistics unit */ - err = mv88e6xxx_stats_set_histogram(chip); - if (err) - return err; + if (chip->info->ops->stats_set_histogram) { + err = chip->info->ops->stats_set_histogram(chip); + if (err) + return err; + } return mv88e6xxx_g1_stats_clear(chip); } @@ -2300,11 +2294,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; } - /* Setup Switch Global 1 Registers */ - err = mv88e6xxx_g1_setup(chip); - if (err) - goto unlock; - err = mv88e6xxx_irl_setup(chip); if (err) goto unlock; @@ -2368,6 +2357,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; } + err = mv88e6xxx_stats_setup(chip); + if (err) + goto unlock; + unlock: mutex_unlock(&chip->reg_lock); -- cgit v1.2.3 From 25ae15fb0ecda594306937ebf9ea65b7d0c4f592 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 14 May 2018 03:14:22 +0800 Subject: net: stmmac: dwmac-sun8i: Use regmap_field for syscon register access On the Allwinner R40, the "GMAC clock" register is located in the CCU block, at a different register address than the other SoCs that have it in the "system control" block. This patch converts the use of regmap to regmap_field for mapping and accessing the syscon register, so we can have the register address in the variants data, and not in the actual register manipulation code. This patch only converts regmap_read() and regmap_write() calls to regmap_field_read() and regmap_field_write() calls. There are some places where it might make sense to switch to regmap_field_update_bits(), but this is not done here to keep the patch simple. Signed-off-by: Chen-Yu Tsai Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 42 +++++++++++++++++------ 1 file changed, 31 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index a3fa65b1ca8e..bbc051474806 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -42,6 +42,7 @@ * This value is used for disabling properly EMAC * and used as a good starting value in case of the * boot process(uboot) leave some stuff. + * @syscon_field reg_field for the syscon's gmac register * @soc_has_internal_phy: Does the MAC embed an internal PHY * @support_mii: Does the MAC handle MII * @support_rmii: Does the MAC handle RMII @@ -49,6 +50,7 @@ */ struct emac_variant { u32 default_syscon_value; + const struct reg_field *syscon_field; bool soc_has_internal_phy; bool support_mii; bool support_rmii; @@ -71,13 +73,21 @@ struct sunxi_priv_data { struct regulator *regulator; struct reset_control *rst_ephy; const struct emac_variant *variant; - struct regmap *regmap; + struct regmap_field *regmap_field; bool internal_phy_powered; void *mux_handle; }; +/* EMAC clock register @ 0x30 in the "system control" address range */ +static const struct reg_field sun8i_syscon_reg_field = { + .reg = 0x30, + .lsb = 0, + .msb = 31, +}; + static const struct emac_variant emac_variant_h3 = { .default_syscon_value = 0x58000, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = true, .support_mii = true, .support_rmii = true, @@ -86,12 +96,14 @@ static const struct emac_variant emac_variant_h3 = { static const struct emac_variant emac_variant_v3s = { .default_syscon_value = 0x38000, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = true, .support_mii = true }; static const struct emac_variant emac_variant_a83t = { .default_syscon_value = 0, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = false, .support_mii = true, .support_rgmii = true @@ -99,6 +111,7 @@ static const struct emac_variant emac_variant_a83t = { static const struct emac_variant emac_variant_a64 = { .default_syscon_value = 0, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = false, .support_mii = true, .support_rmii = true, @@ -216,7 +229,6 @@ static const struct emac_variant emac_variant_a64 = { #define SYSCON_ETCS_MII 0x0 #define SYSCON_ETCS_EXT_GMII 0x1 #define SYSCON_ETCS_INT_GMII 0x2 -#define SYSCON_EMAC_REG 0x30 /* sun8i_dwmac_dma_reset() - reset the EMAC * Called from stmmac via stmmac_dma_ops->reset @@ -745,7 +757,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, bool need_power_ephy = false; if (current_child ^ desired_child) { - regmap_read(gmac->regmap, SYSCON_EMAC_REG, ®); + regmap_field_read(gmac->regmap_field, ®); switch (desired_child) { case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: dev_info(priv->device, "Switch mux to internal PHY"); @@ -763,7 +775,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, desired_child); return -EINVAL; } - regmap_write(gmac->regmap, SYSCON_EMAC_REG, val); + regmap_field_write(gmac->regmap_field, val); if (need_power_ephy) { ret = sun8i_dwmac_power_internal_phy(priv); if (ret) @@ -801,7 +813,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) int ret; u32 reg, val; - regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); + regmap_field_read(gmac->regmap_field, &val); reg = gmac->variant->default_syscon_value; if (reg != val) dev_warn(priv->device, @@ -883,7 +895,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) return -EINVAL; } - regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); + regmap_field_write(gmac->regmap_field, reg); return 0; } @@ -892,7 +904,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) { u32 reg = gmac->variant->default_syscon_value; - regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); + regmap_field_write(gmac->regmap_field, reg); } static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) @@ -980,6 +992,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) int ret; struct stmmac_priv *priv; struct net_device *ndev; + struct regmap *regmap; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -1014,14 +1027,21 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) gmac->regulator = NULL; } - gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "syscon"); - if (IS_ERR(gmac->regmap)) { - ret = PTR_ERR(gmac->regmap); + regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "syscon"); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret); return ret; } + gmac->regmap_field = devm_regmap_field_alloc(dev, regmap, + *gmac->variant->syscon_field); + if (IS_ERR(gmac->regmap_field)) { + ret = PTR_ERR(gmac->regmap_field); + dev_err(dev, "Unable to map syscon register: %d\n", ret); + return ret; + } + plat_dat->interface = of_get_phy_mode(dev->of_node); /* platform data specifying hardware features and callbacks. -- cgit v1.2.3 From 49a06cae6e7ceb77f17914c4617309e038c24c4d Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 14 May 2018 03:14:23 +0800 Subject: net: stmmac: dwmac-sun8i: Allow getting syscon regmap from external device On the Allwinner R40 SoC, the "GMAC clock" register is in the CCU address space. Using a standard syscon to access it provides no coordination with the CCU driver for register access. Neither does it prevent this and other drivers from accessing other, maybe critical, clock control registers. On other SoCs, the register is in the "system control" address space, which might also contain controls for mapping SRAM to devices or the CPU. This hardware has the same issues. Instead, for these types of setups, we let the device containing the control register create a regmap tied to it. We can then get the device from the existing syscon phandle, and retrieve the regmap with dev_get_regmap(). Signed-off-by: Chen-Yu Tsai Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 50 ++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index bbc051474806..79e104a20e20 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -983,6 +983,34 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) return mac; } +static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node) +{ + struct device_node *syscon_node; + struct platform_device *syscon_pdev; + struct regmap *regmap = NULL; + + syscon_node = of_parse_phandle(node, "syscon", 0); + if (!syscon_node) + return ERR_PTR(-ENODEV); + + syscon_pdev = of_find_device_by_node(syscon_node); + if (!syscon_pdev) { + /* platform device might not be probed yet */ + regmap = ERR_PTR(-EPROBE_DEFER); + goto out_put_node; + } + + /* If no regmap is found then the other device driver is at fault */ + regmap = dev_get_regmap(&syscon_pdev->dev, NULL); + if (!regmap) + regmap = ERR_PTR(-EINVAL); + + platform_device_put(syscon_pdev); +out_put_node: + of_node_put(syscon_node); + return regmap; +} + static int sun8i_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -1027,7 +1055,27 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) gmac->regulator = NULL; } - regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "syscon"); + /* The "GMAC clock control" register might be located in the + * CCU address range (on the R40), or the system control address + * range (on most other sun8i and later SoCs). + * + * The former controls most if not all clocks in the SoC. The + * latter has an SoC identification register, and on some SoCs, + * controls to map device specific SRAM to either the intended + * peripheral, or the CPU address space. + * + * In either case, there should be a coordinated and restricted + * method of accessing the register needed here. This is done by + * having the device export a custom regmap, instead of a generic + * syscon, which grants all access to all registers. + * + * To support old device trees, we fall back to using the syscon + * interface if possible. + */ + regmap = sun8i_dwmac_get_syscon_from_dev(pdev->dev.of_node); + if (IS_ERR(regmap)) + regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "syscon"); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret); -- cgit v1.2.3 From 7b270b725a867587957676a8dae862caad558011 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 14 May 2018 03:14:24 +0800 Subject: net: stmmac: dwmac-sun8i: Support different ranges for TX/RX delay chains On the R40 SoC, the RX delay chain only has a range of 0~7 (hundred picoseconds), instead of 0~31. Also the TX delay chain is completely absent. This patch adds support for different ranges by adding per-compatible maximum values in the variant data. A maximum of 0 indicates that the delay chain is not supported or absent. Signed-off-by: Chen-Yu Tsai Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 32 ++++++++++++++++------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 79e104a20e20..4f5612a3c855 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -47,6 +47,12 @@ * @support_mii: Does the MAC handle MII * @support_rmii: Does the MAC handle RMII * @support_rgmii: Does the MAC handle RGMII + * + * @rx_delay_max: Maximum raw value for RX delay chain + * @tx_delay_max: Maximum raw value for TX delay chain + * These two also indicate the bitmask for + * the RX and TX delay chain registers. A + * value of zero indicates this is not supported. */ struct emac_variant { u32 default_syscon_value; @@ -55,6 +61,8 @@ struct emac_variant { bool support_mii; bool support_rmii; bool support_rgmii; + u8 rx_delay_max; + u8 tx_delay_max; }; /* struct sunxi_priv_data - hold all sunxi private data @@ -91,7 +99,9 @@ static const struct emac_variant emac_variant_h3 = { .soc_has_internal_phy = true, .support_mii = true, .support_rmii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, }; static const struct emac_variant emac_variant_v3s = { @@ -106,7 +116,9 @@ static const struct emac_variant emac_variant_a83t = { .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = false, .support_mii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, }; static const struct emac_variant emac_variant_a64 = { @@ -115,7 +127,9 @@ static const struct emac_variant emac_variant_a64 = { .soc_has_internal_phy = false, .support_mii = true, .support_rmii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, }; #define EMAC_BASIC_CTL0 0x00 @@ -219,9 +233,7 @@ static const struct emac_variant emac_variant_a64 = { #define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */ /* Generic system control EMAC_CLK bits */ -#define SYSCON_ETXDC_MASK GENMASK(2, 0) #define SYSCON_ETXDC_SHIFT 10 -#define SYSCON_ERXDC_MASK GENMASK(4, 0) #define SYSCON_ERXDC_SHIFT 5 /* EMAC PHY Interface Type */ #define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */ @@ -847,8 +859,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) } val /= 100; dev_dbg(priv->device, "set tx-delay to %x\n", val); - if (val <= SYSCON_ETXDC_MASK) { - reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT); + if (val <= gmac->variant->tx_delay_max) { + reg &= ~(gmac->variant->tx_delay_max << + SYSCON_ETXDC_SHIFT); reg |= (val << SYSCON_ETXDC_SHIFT); } else { dev_err(priv->device, "Invalid TX clock delay: %d\n", @@ -864,8 +877,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) } val /= 100; dev_dbg(priv->device, "set rx-delay to %x\n", val); - if (val <= SYSCON_ERXDC_MASK) { - reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT); + if (val <= gmac->variant->rx_delay_max) { + reg &= ~(gmac->variant->rx_delay_max << + SYSCON_ERXDC_SHIFT); reg |= (val << SYSCON_ERXDC_SHIFT); } else { dev_err(priv->device, "Invalid RX clock delay: %d\n", -- cgit v1.2.3 From 9bf5085aaaadd6488a0ea6121e0cb7dadaf0652a Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 14 May 2018 03:14:25 +0800 Subject: net: stmmac: dwmac-sun8i: Add support for GMAC on Allwinner R40 SoC The Allwinner R40 SoC has the EMAC controller supported by dwmac-sun8i. It is named "GMAC", while EMAC refers to the 10/100 Mbps Ethernet controller supported by sun4i-emac. The controller is the same, but the R40 has the glue layer controls in the clock control unit (CCU), with a reduced RX delay chain, and no TX delay chain. This patch adds support for it using the framework laid out by previous patches to map the differences. Signed-off-by: Chen-Yu Tsai Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4f5612a3c855..2f7f0915f071 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -93,6 +93,13 @@ static const struct reg_field sun8i_syscon_reg_field = { .msb = 31, }; +/* EMAC clock register @ 0x164 in the CCU address range */ +static const struct reg_field sun8i_ccu_reg_field = { + .reg = 0x164, + .lsb = 0, + .msb = 31, +}; + static const struct emac_variant emac_variant_h3 = { .default_syscon_value = 0x58000, .syscon_field = &sun8i_syscon_reg_field, @@ -121,6 +128,14 @@ static const struct emac_variant emac_variant_a83t = { .tx_delay_max = 7, }; +static const struct emac_variant emac_variant_r40 = { + .default_syscon_value = 0, + .syscon_field = &sun8i_ccu_reg_field, + .support_mii = true, + .support_rgmii = true, + .rx_delay_max = 7, +}; + static const struct emac_variant emac_variant_a64 = { .default_syscon_value = 0, .syscon_field = &sun8i_syscon_reg_field, @@ -1160,6 +1175,8 @@ static const struct of_device_id sun8i_dwmac_match[] = { .data = &emac_variant_v3s }, { .compatible = "allwinner,sun8i-a83t-emac", .data = &emac_variant_a83t }, + { .compatible = "allwinner,sun8i-r40-gmac", + .data = &emac_variant_r40 }, { .compatible = "allwinner,sun50i-a64-emac", .data = &emac_variant_a64 }, { } -- cgit v1.2.3 From bde4c563a93f643cee9854b0d354710e1ff69284 Mon Sep 17 00:00:00 2001 From: Hernán Gonzalez Date: Sun, 13 May 2018 20:33:49 -0300 Subject: net: ethernet: ti: Use ERR_CAST instead of ERR_PTR(PTR_ERR()) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(...)). drivers/net/ethernet/ti/cpts.c:567:9-16: WARNING: ERR_CAST can be used with cpts->refclk Generated by: scripts/coccinelle/api/err_cast.cocci Signed-off-by: Hernán Gonzalez Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index e7b76f6b4f67..7842f094f2ef 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -564,7 +564,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, cpts->refclk = devm_clk_get(dev, "cpts"); if (IS_ERR(cpts->refclk)) { dev_err(dev, "Failed to get cpts refclk\n"); - return ERR_PTR(PTR_ERR(cpts->refclk)); + return ERR_CAST(cpts->refclk); } clk_prepare(cpts->refclk); -- cgit v1.2.3 From 55c0211dcb5d33955ed977de81373cef91de665b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 14 May 2018 09:40:44 +0300 Subject: mlxsw: spectrum_span: Support LAG under mirror-to-gretap When resolving a path that the packet will take after being encapsulated in mirror-to-gretap scenarios, one of the devices en route could be a LAG. In that case, mirror to first up slave that corresponds to a front panel port. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index e5f4f7620ab7..da3f7f527360 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -245,6 +245,19 @@ mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, return vlan_dev_real_dev(vlan_dev); } +static struct net_device * +mlxsw_sp_span_entry_lag(struct net_device *lag_dev) +{ + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(lag_dev, dev, iter) + if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) + return dev; + + return NULL; +} + static __maybe_unused int mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, union mlxsw_sp_l3addr saddr, @@ -278,6 +291,14 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, edev = mlxsw_sp_span_entry_vlan(edev, &vid); } + if (netif_is_lag_master(edev)) { + if (!(edev->flags & IFF_UP)) + goto unoffloadable; + edev = mlxsw_sp_span_entry_lag(edev); + if (!edev) + goto unoffloadable; + } + if (!mlxsw_sp_port_dev_check(edev)) goto unoffloadable; -- cgit v1.2.3 From 7cfac881660ac7b7229950e0a942201be63c15d1 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Mon, 14 May 2018 13:24:43 +0530 Subject: cxgb4: do not fail vf instatiation in slave mode We no longer require a check for cxgb4 to be MASTER when configuring SRIOV, It was required when we had module parameter to instantiate vf. Signed-off-by: Arjun Vynipadath Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 07baa5e1a8c5..130d1eed7993 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5276,13 +5276,9 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) u32 pcie_fw; pcie_fw = readl(adap->regs + PCIE_FW_A); - /* Check if cxgb4 is the MASTER and fw is initialized */ - if (num_vfs && - (!(pcie_fw & PCIE_FW_INIT_F) || - !(pcie_fw & PCIE_FW_MASTER_VLD_F) || - PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { - dev_warn(&pdev->dev, - "cxgb4 driver needs to be MASTER to support SRIOV\n"); + /* Check if fw is initialized */ + if (!(pcie_fw & PCIE_FW_INIT_F)) { + dev_warn(&pdev->dev, "Device not initialized\n"); return -EOPNOTSUPP; } -- cgit v1.2.3 From b8c931ba3c739b49bd536d35851712d838857757 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 7 Sep 2017 22:30:58 +0300 Subject: net/mlx5e: Remove redundant vport context vlan update In delete vlan flow an extra call to mlx5e_vport_context_update_vlans was added by mistake, remove it. Fixes: 86d722ad2c3b ("net/mlx5: Use flow steering infrastructure for mlx5_en") Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f64dda2bed31..76cc10e44080 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -277,7 +277,6 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } break; case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: - mlx5e_vport_context_update_vlans(priv); if (priv->fs.vlan.active_cvlans_rule[vid]) { mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); priv->fs.vlan.active_cvlans_rule[vid] = NULL; -- cgit v1.2.3 From e36d4810f2e98110e2b0de11ffc60f71a0a26805 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 13 Nov 2017 18:07:50 +0200 Subject: net/mlx5e: Skip redundant checks when providing NUD lastuse feedback It's redundant to continue the loop if we found one flow whose lastuse value being newer than the last one we reported, since this is enough for us to trigger a NUD update (neigh_event_send()). Signed-off-by: Roi Dayan Reviewed-by: Paul Blakey Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b94276db3ce9..9b0e3ccffa66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -982,6 +982,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) } } } + if (neigh_used) + break; } if (neigh_used) { -- cgit v1.2.3 From bd206fd52e039870027d23d6de9f432272fc61d5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 23 Nov 2017 14:29:25 +0200 Subject: net/mlx5e: Use u8 instead of int for LRO number of segments Range of LRO number of segments fits in u8. Also, bring initialization and declaration together to save code. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7bbf0db27a01..e10422b845ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -681,11 +681,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb) { + u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; struct net_device *netdev = rq->netdev; - int lro_num_seg; skb->mac_len = ETH_HLEN; - lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); -- cgit v1.2.3 From efb6d7a20c856e8714d17f5a4ce509df893c4f01 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 23 Nov 2017 14:39:22 +0200 Subject: net/mlx5e: Use bool as return type for mlx5e_xdp_handle Function returns boolean values, use bool instead of int. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e10422b845ca..abc6ca8168d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -807,9 +807,9 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } /* returns true if packet was consumed by xdp */ -static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len) +static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; -- cgit v1.2.3 From 4601266095501d2af4a877f6ee0ecf18af8cdcd8 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 8 Apr 2018 09:45:32 +0300 Subject: net/mlx5e: Remove double defined DMAC header re-write element The firmware DMAC_47_16 header re-write token was defined twice, clean it up. Signed-off-by: Or Gerlitz Reported-by: Jianbo Liu Reviewed-by: Jianbo Liu Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9b0e3ccffa66..969a1768da71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1579,7 +1579,6 @@ struct mlx5_fields { {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)} static struct mlx5_fields fields[] = { - OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0), OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0), OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0), OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0), -- cgit v1.2.3 From b3a433de7b421d700de99ac281d8384494020cd2 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 8 Apr 2018 09:59:22 +0300 Subject: net/mlx5e: Clean static checker complaints on TC offload and VF reps code Clean warning/check complaints made by checkpatch on en_{tc,rep}.c Signed-off-by: Or Gerlitz Reviewed-by: Jianbo Liu Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 11 +++++------ 2 files changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 876c3e4c6193..a689f4c90fe3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -681,8 +681,8 @@ static int mlx5e_rep_open(struct net_device *dev) goto unlock; if (!mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -699,8 +699,8 @@ static int mlx5e_rep_close(struct net_device *dev) mutex_lock(&priv->state_lock); mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 969a1768da71..85ad0948951c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -97,7 +97,7 @@ enum { }; #define MLX5E_TC_TABLE_NUM_GROUPS 4 -#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16) +#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) struct mlx5e_hairpin { struct mlx5_hairpin *pair; @@ -789,7 +789,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mlx5_del_flow_rules(flow->rule); mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { + if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } @@ -1765,12 +1765,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, err = -EOPNOTSUPP; /* can't be all optimistic */ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n"); + netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n"); goto out_err; } if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd); + netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd); goto out_err; } @@ -1794,8 +1794,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { cmd_masks = &masks[cmd]; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { - printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n", - cmd); + netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 16, 1, cmd_masks, sizeof(zero_masks), true); err = -EOPNOTSUPP; -- cgit v1.2.3 From c180f675349de832030eaa03badbac6f347f487a Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 14:46:02 +0300 Subject: net/mlx5e: Avoid redundant zeroing of offloaded TC flow attributes This is not needed as the attributes are zeroed out on allocation. Signed-off-by: Or Gerlitz Reviewed-by: Jianbo Liu Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 85ad0948951c..7d88e813bad1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1923,7 +1923,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - attr->action = 0; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { @@ -2464,7 +2463,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (!tcf_exts_has_actions(exts)) return -EINVAL; - memset(attr, 0, sizeof(*attr)); attr->in_rep = rpriv->rep; tcf_exts_to_list(exts, &actions); -- cgit v1.2.3 From 31c8eba5e860f5f35918b2f72c45e689805d1c32 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 14:55:39 +0300 Subject: net/mlx5e: Return success when TC offloaded fdb actions parsed ok Reaching here, means we didn't err anywhere, so lets just return success. Signed-off-by: Or Gerlitz Reviewed-by: Jianbo Liu Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7d88e813bad1..c15bd7b6a840 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2458,7 +2458,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, const struct tc_action *a; LIST_HEAD(actions); bool encap = false; - int err = 0; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -2474,6 +2473,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_pedit(a)) { + int err; + err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, parse_attr); if (err) @@ -2561,7 +2562,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; - return err; + return 0; } int mlx5e_configure_flower(struct mlx5e_priv *priv, -- cgit v1.2.3 From 1cab1cd74bee68aea786e075e979921f686f88a2 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 14:58:25 +0300 Subject: net/mlx5e: Use local actions var while processing offloaded TC flow actions Use local actions variable while parsing the actions of offloaded TC flow. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 44 ++++++++++++++----------- 1 file changed, 24 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index c15bd7b6a840..24b6ca205c74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1917,6 +1917,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; LIST_HEAD(actions); + u32 action = 0; int err; if (!tcf_exts_has_actions(exts)) @@ -1927,10 +1928,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.flow_counter)) - attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } @@ -1940,13 +1941,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (err) return err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } if (is_tcf_csum(a)) { - if (csum_offload_supported(priv, attr->action, + if (csum_offload_supported(priv, action, tcf_csum_update_flags(a))) continue; @@ -1960,8 +1961,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, same_hw_devs(priv, netdev_priv(peer_dev))) { parse_attr->mirred_ifindex = peer_dev->ifindex; flow->flags |= MLX5E_TC_FLOW_HAIRPIN; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); @@ -1980,13 +1981,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->flow_tag = mark; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } return -EINVAL; } + attr->action = action; if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; @@ -2458,6 +2460,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, const struct tc_action *a; LIST_HEAD(actions); bool encap = false; + u32 action = 0; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -2467,8 +2470,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } @@ -2480,12 +2483,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (err) return err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; continue; } if (is_tcf_csum(a)) { - if (csum_offload_supported(priv, attr->action, + if (csum_offload_supported(priv, action, tcf_csum_update_flags(a))) continue; @@ -2500,8 +2503,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (switchdev_port_same_parent_id(priv->netdev, out_dev)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; attr->out_rep = rpriv->rep; @@ -2509,9 +2512,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; attr->parse_attr = parse_attr; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; /* attr->out_rep is resolved when we handle encap */ } else { pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", @@ -2532,9 +2535,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_vlan(a)) { if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->vlan_vid = tcf_vlan_push_vid(a); if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { attr->vlan_prio = tcf_vlan_push_prio(a); @@ -2552,13 +2555,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_tunnel_release(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; continue; } return -EINVAL; } + attr->action = action; if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; -- cgit v1.2.3 From 547829004c98941f73d010c87c2111e29a6c03ae Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 15:33:52 +0300 Subject: net/mlx5e: Properly order min inline mode setup while parsing TC matches Set the initial value to none instead of L2, and set to L2 where the previous initial value was assumed. Make sure to parse L2 matches before L3 matches and L3 before L4. This is a pre-step to get the match level for more purposes other than the validating the needed vs. actual inline level. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 129 ++++++++++++++---------- 1 file changed, 76 insertions(+), 53 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 24b6ca205c74..78f5c47affb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1201,7 +1201,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, u16 addr_type = 0; u8 ip_proto = 0; - *min_inline = MLX5_INLINE_MODE_L2; + *min_inline = MLX5_INLINE_MODE_NONE; if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -1251,58 +1251,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); - - struct flow_dissector_key_control *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->mask); - addr_type = key->addr_type; - - /* the HW doesn't support frag first/later */ - if (mask->flags & FLOW_DIS_FIRST_FRAG) - return -EOPNOTSUPP; - - if (mask->flags & FLOW_DIS_IS_FRAGMENT) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, - key->flags & FLOW_DIS_IS_FRAGMENT); - - /* the HW doesn't need L3 inline to match on frag=no */ - if (key->flags & FLOW_DIS_IS_FRAGMENT) - *min_inline = MLX5_INLINE_MODE_IP; - } - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - ip_proto = key->ip_proto; - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - mask->ip_proto); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - key->ip_proto); - - if (mask->ip_proto) - *min_inline = MLX5_INLINE_MODE_IP; - } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, @@ -1326,6 +1274,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16), key->src); + + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + *min_inline = MLX5_INLINE_MODE_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { @@ -1346,9 +1297,79 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); + + *min_inline = MLX5_INLINE_MODE_L2; } } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); + + if (mask->n_proto) + *min_inline = MLX5_INLINE_MODE_L2; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + + struct flow_dissector_key_control *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->mask); + addr_type = key->addr_type; + + /* the HW doesn't support frag first/later */ + if (mask->flags & FLOW_DIS_FIRST_FRAG) + return -EOPNOTSUPP; + + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + key->flags & FLOW_DIS_IS_FRAGMENT); + + /* the HW doesn't need L3 inline to match on frag=no */ + if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) + *min_inline = MLX5_INLINE_MODE_L2; + /* *** L2 attributes parsing up to here *** */ + else + *min_inline = MLX5_INLINE_MODE_IP; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, + mask->ip_proto); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + key->ip_proto); + + if (mask->ip_proto) + *min_inline = MLX5_INLINE_MODE_IP; + } + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_dissector_key_ipv4_addrs *key = skb_flow_dissector_target(f->dissector, @@ -1433,6 +1454,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *min_inline = MLX5_INLINE_MODE_IP; } + /* *** L3 attributes parsing up to here *** */ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_dissector_key_ports *key = skb_flow_dissector_target(f->dissector, -- cgit v1.2.3 From d708f902989b844907c5f7720abe67812a256c33 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 15:42:06 +0300 Subject: net/mlx5e: Get the required HW match level while parsing TC flow matches Introduce levels of matching on headers of offloaded flows (none, L2, L3, L4) that follow the inline mode levels. This is pre-step for us to offload flows without any matches on headers. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 34 +++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 7 +++++ 2 files changed, 24 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 78f5c47affb4..e84bcea8b071 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1192,7 +1192,7 @@ vxlan_match_offload_err: static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - u8 *min_inline) + u8 *match_level) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); @@ -1201,7 +1201,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, u16 addr_type = 0; u8 ip_proto = 0; - *min_inline = MLX5_INLINE_MODE_NONE; + *match_level = MLX5_MATCH_NONE; if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -1276,7 +1276,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, key->src); if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { @@ -1298,7 +1298,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } } @@ -1317,7 +1317,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ntohs(key->n_proto)); if (mask->n_proto) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -1343,10 +1343,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* the HW doesn't need L3 inline to match on frag=no */ if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_INLINE_MODE_L2; /* *** L2 attributes parsing up to here *** */ else - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_INLINE_MODE_IP; } } @@ -1367,7 +1367,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, key->ip_proto); if (mask->ip_proto) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -1394,7 +1394,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, &key->dst, sizeof(key->dst)); if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -1423,7 +1423,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) { @@ -1451,7 +1451,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EOPNOTSUPP; if (mask->tos || mask->ttl) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } /* *** L3 attributes parsing up to here *** */ @@ -1496,7 +1496,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { @@ -1515,7 +1515,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ntohs(key->flags)); if (mask->flags) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } return 0; @@ -1530,19 +1530,19 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - u8 min_inline; + u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, &min_inline); + err = __parse_cls_flower(priv, spec, f, &match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < min_inline)) { + esw->offloads.inline_mode < match_level)) { netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - min_inline, esw->offloads.inline_mode); + match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4cd773fa55e3..efae77dd1e35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -227,6 +227,13 @@ enum { SET_VLAN_INSERT = BIT(1) }; +enum mlx5_flow_match_level { + MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, + MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, + MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, + MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, +}; + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; -- cgit v1.2.3 From 38aa51c134b56b7ea61bea79b428c5fbcd95f285 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 15:50:36 +0300 Subject: net/mlx5e: Support offloaded TC flows with no matches on headers For example: tc filter add dev ens2f0_0 parent ffff: flower skip_sw action drop Note that for eswitch flows, we still always match on the source port. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 ++++++-- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index e84bcea8b071..1dc24e3a0841 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -58,6 +58,7 @@ struct mlx5_nic_flow_attr { u32 flow_tag; u32 mod_hdr_id; u32 hairpin_tirn; + u8 match_level; struct mlx5_flow_table *hairpin_ft; }; @@ -753,7 +754,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, table_created = true; } - parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + if (attr->match_level != MLX5_MATCH_NONE) + parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, &flow_act, dest, dest_ix); @@ -1547,6 +1550,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + flow->esw_attr->match_level = match_level; + else + flow->nic_attr->match_level = match_level; + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index efae77dd1e35..edf47a4d549e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -245,6 +245,7 @@ struct mlx5_esw_flow_attr { bool vlan_handled; u32 encap_id; u32 mod_hdr_id; + u8 match_level; struct mlx5e_tc_flow_parse_attr *parse_attr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 35e256eb2f6e..8dd0eca03202 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -91,8 +91,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS; + if (attr->match_level == MLX5_MATCH_NONE) + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + else + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; -- cgit v1.2.3 From 1e7477ae8bbeadfa90064d6bedc421b12bc2d43d Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Wed, 28 Mar 2018 13:26:50 +0300 Subject: net/mlx5e: Report all channels with min RX WQEs timeout Report all channels which got timeout on posting the minimal number of RX WQEs and not only the first one. Avoid busy wait on every channel, when one of the RQs check got timeout, poll once for the remaining RQs. In addition, add channel index to log when failed to get min RX WQEs This info is needed in order to debug in case of dysfunctional channel. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0f2b66b41899..b0d6dde2754f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -747,23 +747,24 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) mlx5_core_destroy_rq(rq->mdev, rq->rqn); } -static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) +static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) { - unsigned long exp_time = jiffies + msecs_to_jiffies(20000); + unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); struct mlx5e_channel *c = rq->channel; struct mlx5_wq_ll *wq = &rq->wq; u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq)); - while (time_before(jiffies, exp_time)) { + do { if (wq->cur_sz >= min_wqes) return 0; msleep(20); - } + } while (time_before(jiffies, exp_time)); + + netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + c->ix, rq->rqn, wq->cur_sz, min_wqes); - netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - rq->rqn, wq->cur_sz, min_wqes); return -ETIMEDOUT; } @@ -2128,13 +2129,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) int err = 0; int i; - for (i = 0; i < chs->num; i++) { - err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq); - if (err) - break; - } + for (i = 0; i < chs->num; i++) + err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, + err ? 0 : 20000); - return err; + return err ? -ETIMEDOUT : 0; } static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) -- cgit v1.2.3 From af5a6c93656eec9a44617c77fae62af54d5a5061 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 23 Jan 2018 12:27:11 +0200 Subject: net/mlx5e: Use __set_bit for adaptive-moderation bit in RQ state Make the code more clear by replacing the existing code with __set_bit. Signed-off-by: Gal Pressman Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b0d6dde2754f..417bf2e8ab85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -820,7 +820,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, goto err_destroy_rq; if (params->rx_dim_enabled) - c->rq.state |= BIT(MLX5E_RQ_STATE_AM); + __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); return 0; -- cgit v1.2.3 From bfbe2057531026ac20bba9f45d3a21a98f3c592a Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 23 Jan 2018 12:28:12 +0200 Subject: net/mlx5e: Use test bit in en accel xmit flow Replace (mask & bit) check with test_bit. Signed-off-by: Gal Pressman Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 68fcb40a2847..f20074dbef32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -49,7 +49,7 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, u16 *pi) { #ifdef CONFIG_MLX5_EN_TLS - if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) { + if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); if (unlikely(!skb)) return NULL; @@ -57,7 +57,7 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, #endif #ifdef CONFIG_MLX5_EN_IPSEC - if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { + if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); if (unlikely(!skb)) return NULL; -- cgit v1.2.3 From 0e5c04f6b52b248cfdb7e791fd5702f12270df7b Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 23 Jan 2018 12:23:26 +0200 Subject: net/mlx5e: Remove MLX5E_TEST_BIT macro MLX5E_TEST_BIT macro is the same as the already existent test_bit, remove it and replace all usages. Signed-off-by: Gal Pressman Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 4 ++-- 4 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9cc07da09b70..7c930088e96e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -288,8 +288,6 @@ enum { MLX5E_RQ_STATE_AM, }; -#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr)) - struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index abc6ca8168d2..53f72923b164 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -450,7 +450,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) struct mlx5_wq_ll *wq = &rq->wq; int err; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; if (mlx5_wq_ll_is_full(wq)) @@ -508,7 +508,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5_cqe64 *cqe; - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return; cqe = mlx5_cqwq_get_cqe(&cq->wq); @@ -525,7 +525,7 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); @@ -1132,7 +1132,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5_cqe64 *cqe; int work_done = 0; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; if (cq->decmprs_left) @@ -1185,7 +1185,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) sq = container_of(cq, struct mlx5e_xdpsq, cq); - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; cqe = mlx5_cqwq_get_cqe(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 047614d2eda2..2d3f17da5f5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -450,7 +450,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_txqsq, cq); - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; cqe = mlx5_cqwq_get_cqe(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 900661d45c8a..5d6f9ce2bf80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -48,7 +48,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) { struct net_dim_sample dim_sample; - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_AM))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) return; net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, @@ -60,7 +60,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) { struct net_dim_sample dim_sample; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_AM))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) return; net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, -- cgit v1.2.3 From 98f3697f8d4129366b5215e47719581db2d54df2 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Mon, 14 May 2018 17:51:21 +0530 Subject: cxgb4: add tc flower match support for tunnel VNI Adds support for matching flows based on tunnel VNI value. Introduces fw APIs for allocating/removing MPS entries related to encapsulation. And uses the same while adding/deleting filters for offloading flows based on tunnel VNI match. Signed-off-by: Kumar Sanghvi Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 91 ++++++++++++++++++++-- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 18 +++++ drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 86 ++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 4 + drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 34 ++++++++ 7 files changed, 237 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 01e7aad4ce5b..211086bdbe20 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1038,6 +1038,7 @@ struct ch_sched_queue { #define VF_BITWIDTH 8 #define IVLAN_BITWIDTH 16 #define OVLAN_BITWIDTH 16 +#define ENCAP_VNI_BITWIDTH 24 /* Filter matching rules. These consist of a set of ingress packet field * (value, mask) tuples. The associated ingress packet field matches the @@ -1068,6 +1069,7 @@ struct ch_filter_tuple { uint32_t ivlan_vld:1; /* inner VLAN valid */ uint32_t ovlan_vld:1; /* outer VLAN valid */ uint32_t pfvf_vld:1; /* PF/VF valid */ + uint32_t encap_vld:1; /* Encapsulation valid */ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ uint32_t iport:IPORT_BITWIDTH; /* ingress port */ @@ -1078,6 +1080,7 @@ struct ch_filter_tuple { uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ + uint32_t vni:ENCAP_VNI_BITWIDTH; /* VNI of tunnel */ /* Uncompressed header matching field rules. These are always * available for field rules. @@ -1694,6 +1697,12 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx, + bool sleep_ok); +int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int vni, + unsigned int vni_mask, u8 dip_hit, u8 lookup_type, + bool sleep_ok); int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index aae980205ece..ac4d7f75fdc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -265,6 +265,8 @@ static int validate_filter(struct net_device *dev, fs->mask.pfvf_vld) || unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld, fs->mask.ovlan_vld) || + unsupported(fconf, VNIC_ID_F, fs->val.encap_vld, + fs->mask.encap_vld) || unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld)) return -EOPNOTSUPP; @@ -275,8 +277,12 @@ static int validate_filter(struct net_device *dev, * carries that overlap, we need to translate any PF/VF * specification into that internal format below. */ - if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && - is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) + if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && + is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) || + (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && + is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) || + (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) && + is_field_set(fs->val.encap_vld, fs->mask.encap_vld))) return -EOPNOTSUPP; if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) || (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) && @@ -306,6 +312,9 @@ static int validate_filter(struct net_device *dev, fs->newvlan == VLAN_REWRITE)) return -EOPNOTSUPP; + if (fs->val.encap_vld && + CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) + return -EOPNOTSUPP; return 0; } @@ -705,6 +714,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx) */ void clear_filter(struct adapter *adap, struct filter_entry *f) { + struct port_info *pi = netdev_priv(f->dev); + /* If the new or old filter have loopback rewriteing rules then we'll * need to free any existing L2T, SMT, CLIP entries of filter * rule. @@ -715,6 +726,12 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->smt) cxgb4_smt_release(f->smt); + if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) + if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan & + 0x1ff].refcnt)) + t4_free_encap_mac_filt(adap, pi->viid, + f->fs.val.ovlan & 0x1ff, 0); + if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); @@ -840,6 +857,10 @@ bool is_filter_exact_match(struct adapter *adap, if (!is_hashfilter(adap)) return false; + /* Keep tunnel VNI match disabled for hash-filters for now */ + if (fs->mask.encap_vld) + return false; + if (fs->type) { if (is_inaddr_any(fs->val.fip, AF_INET6) || !is_addr_all_mask(fs->mask.fip, AF_INET6)) @@ -961,8 +982,12 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs, ntuple |= (u64)(fs->val.tos) << tp->tos_shift; if (tp->vnic_shift >= 0) { - if ((adap->params.tp.ingress_config & VNIC_F) && - fs->mask.pfvf_vld) + if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) && + fs->mask.encap_vld) + ntuple |= (u64)((fs->val.encap_vld << 16) | + (fs->val.ovlan)) << tp->vnic_shift; + else if ((adap->params.tp.ingress_config & VNIC_F) && + fs->mask.pfvf_vld) ntuple |= (u64)((fs->val.pfvf_vld << 16) | (fs->val.pf << 13) | (fs->val.vf)) << tp->vnic_shift; @@ -1076,6 +1101,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); + struct port_info *pi = netdev_priv(dev); struct tid_info *t = &adapter->tids; struct filter_entry *f; struct sk_buff *skb; @@ -1142,13 +1168,34 @@ static int cxgb4_set_hash_filter(struct net_device *dev, f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } else if (iconf & USE_ENC_IDX_F) { + if (f->fs.val.encap_vld) { + struct port_info *pi = netdev_priv(f->dev); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + + /* allocate MPS TCAM entry */ + ret = t4_alloc_encap_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + f->fs.val.vni, + f->fs.mask.vni, + 0, 1, 1); + if (ret < 0) + goto free_atid; + + atomic_inc(&adapter->mps_encap[ret].refcnt); + f->fs.val.ovlan = ret; + f->fs.mask.ovlan = 0xffff; + f->fs.val.ovlan_vld = 1; + f->fs.mask.ovlan_vld = 1; + } } size = sizeof(struct cpl_t6_act_open_req); if (f->fs.type) { ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1); if (ret) - goto free_atid; + goto free_mps; skb = alloc_skb(size, GFP_KERNEL); if (!skb) { @@ -1163,7 +1210,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, skb = alloc_skb(size, GFP_KERNEL); if (!skb) { ret = -ENOMEM; - goto free_atid; + goto free_mps; } mk_act_open_req(f, skb, @@ -1179,6 +1226,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev, free_clip: cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); +free_mps: + if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) + t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1); + free_atid: cxgb4_free_atid(t, atid); @@ -1360,6 +1411,27 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } else if (iconf & USE_ENC_IDX_F) { + if (f->fs.val.encap_vld) { + struct port_info *pi = netdev_priv(f->dev); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + + /* allocate MPS TCAM entry */ + ret = t4_alloc_encap_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + f->fs.val.vni, + f->fs.mask.vni, + 0, 1, 1); + if (ret < 0) + goto free_clip; + + atomic_inc(&adapter->mps_encap[ret].refcnt); + f->fs.val.ovlan = ret; + f->fs.mask.ovlan = 0x1ff; + f->fs.val.ovlan_vld = 1; + f->fs.mask.ovlan_vld = 1; + } } /* Attempt to set the filter. If we don't succeed, we clear @@ -1376,6 +1448,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, } return ret; + +free_clip: + if (is_t6(adapter->params.chip) && f->fs.type) + cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); + cxgb4_clear_ftid(&adapter->tids, filter_id, + fs->type ? PF_INET6 : PF_INET, chip_ver); + return ret; } static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 36563364bae7..3ddd2c4acf68 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -194,6 +194,23 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.tos = mask->tos; } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key, *mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + cls->mask); + fs->val.vni = be32_to_cpu(key->keyid); + fs->mask.vni = be32_to_cpu(mask->keyid); + if (fs->mask.vni) { + fs->val.encap_vld = 1; + fs->mask.encap_vld = 1; + } + } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key, *mask; u16 vlan_tci, vlan_tci_mask; @@ -247,6 +264,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 1817a0307d26..77c2c538b1fd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -491,7 +491,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev, if (tp->protocol_shift >= 0) ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; - if (tp->vnic_shift >= 0) { + if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) { u32 viid = cxgb4_port_viid(dev); u32 vf = FW_VIID_VIN_G(viid); u32 pf = FW_VIID_PFN_G(viid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7cb3ef466cc7..df5e7c79223b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7512,6 +7512,43 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } +/** + * t4_free_encap_mac_filt - frees MPS entry at given index + * @adap: the adapter + * @viid: the VI id + * @idx: index of MPS entry to be freed + * @sleep_ok: call is allowed to sleep + * + * Frees the MPS entry at supplied index + * + * Returns a negative error number or zero on success + */ +int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, + int idx, bool sleep_ok) +{ + struct fw_vi_mac_exact *p; + u8 addr[] = {0, 0, 0, 0, 0, 0}; + struct fw_vi_mac_cmd c; + int ret = 0; + u32 exact; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC); + c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + exact | + FW_CMD_LEN16_V(1)); + p = c.u.exact; + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + return ret; +} + /** * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam * @adap: the adapter @@ -7562,6 +7599,55 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); } +/** + * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support + * @adap: the adapter + * @viid: the VI id + * @mac: the MAC address + * @mask: the mask + * @vni: the VNI id for the tunnel protocol + * @vni_mask: mask for the VNI id + * @dip_hit: to enable DIP match for the MPS entry + * @lookup_type: MAC address for inner (1) or outer (0) header + * @sleep_ok: call is allowed to sleep + * + * Allocates an MPS entry with specified MAC address and VNI value. + * + * Returns a negative error number or the allocated index for this mac. + */ +int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int vni, + unsigned int vni_mask, u8 dip_hit, u8 lookup_type, + bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + struct fw_vi_mac_vni *p = c.u.exact_vni; + int ret = 0; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI); + c.freemacs_to_len16 = cpu_to_be32(val); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); + + p->lookup_type_to_vni = + cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) | + FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) | + FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type)); + p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask)); + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + if (ret == 0) + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); + return ret; +} + /** * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 276fdf214b75..843f8cada1de 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1598,6 +1598,10 @@ #define VNIC_V(x) ((x) << VNIC_S) #define VNIC_F VNIC_V(1U) +#define USE_ENC_IDX_S 13 +#define USE_ENC_IDX_V(x) ((x) << USE_ENC_IDX_S) +#define USE_ENC_IDX_F USE_ENC_IDX_V(1U) + #define CSUM_HAS_PSEUDO_HDR_S 10 #define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S) #define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0e007ee72318..e6b2e9549d56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2158,6 +2158,14 @@ struct fw_vi_mac_cmd { __be64 data0m_pkd; __be32 data1m[2]; } raw; + struct fw_vi_mac_vni { + __be16 valid_to_idx; + __u8 macaddr[6]; + __be16 r7; + __u8 macaddr_mask[6]; + __be32 lookup_type_to_vni; + __be32 vni_mask_pkd; + } exact_vni[2]; } u; }; @@ -2205,6 +2213,32 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_RAW_IDX_G(x) \ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_S 31 +#define FW_VI_MAC_CMD_LOOKUP_TYPE_M 0x1 +#define FW_VI_MAC_CMD_LOOKUP_TYPE_V(x) ((x) << FW_VI_MAC_CMD_LOOKUP_TYPE_S) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_G(x) \ + (((x) >> FW_VI_MAC_CMD_LOOKUP_TYPE_S) & FW_VI_MAC_CMD_LOOKUP_TYPE_M) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_F FW_VI_MAC_CMD_LOOKUP_TYPE_V(1U) + +#define FW_VI_MAC_CMD_DIP_HIT_S 30 +#define FW_VI_MAC_CMD_DIP_HIT_M 0x1 +#define FW_VI_MAC_CMD_DIP_HIT_V(x) ((x) << FW_VI_MAC_CMD_DIP_HIT_S) +#define FW_VI_MAC_CMD_DIP_HIT_G(x) \ + (((x) >> FW_VI_MAC_CMD_DIP_HIT_S) & FW_VI_MAC_CMD_DIP_HIT_M) +#define FW_VI_MAC_CMD_DIP_HIT_F FW_VI_MAC_CMD_DIP_HIT_V(1U) + +#define FW_VI_MAC_CMD_VNI_S 0 +#define FW_VI_MAC_CMD_VNI_M 0xffffff +#define FW_VI_MAC_CMD_VNI_V(x) ((x) << FW_VI_MAC_CMD_VNI_S) +#define FW_VI_MAC_CMD_VNI_G(x) \ + (((x) >> FW_VI_MAC_CMD_VNI_S) & FW_VI_MAC_CMD_VNI_M) + +#define FW_VI_MAC_CMD_VNI_MASK_S 0 +#define FW_VI_MAC_CMD_VNI_MASK_M 0xffffff +#define FW_VI_MAC_CMD_VNI_MASK_V(x) ((x) << FW_VI_MAC_CMD_VNI_MASK_S) +#define FW_VI_MAC_CMD_VNI_MASK_G(x) \ + (((x) >> FW_VI_MAC_CMD_VNI_MASK_S) & FW_VI_MAC_CMD_VNI_MASK_M) + #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { -- cgit v1.2.3 From 1204aa17f3b4f63e67ac9b7c9afa9496485969c5 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 10 May 2018 15:21:39 +0200 Subject: brcmfmac: set WIPHY_FLAG_HAVE_AP_SME flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit brcmfmac is a FullMAC driver and it implements/uses cfg80211 interface for stations management. At the same time it doesn't receive or pass up management frames. This flag indicates that authenticator doesn't have to subscribe to or handle management frames. Some authenticators (e.g. hostapd) were working with brcmfmac thanks to some extra assumptions. This commit clears up the situation. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 79a124d1aa23..84135dac49b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6518,6 +6518,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_PS_ON_BY_DEFAULT | + WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) -- cgit v1.2.3 From 54b5172087aeae61150835c91e68f084a9644f1c Mon Sep 17 00:00:00 2001 From: Sanjay Kumar Konduri Date: Fri, 11 May 2018 20:27:51 +0530 Subject: rsi: Add null check for virtual interfaces in wowlan config When the "poweroff" command is executed after wowlan enabled, we have observed a system crash. In the system "poweroff" sequence, network-manager is sent to inactive state by cleaning up the network interfaces, using rsi_mac80211_remove_interface() and when driver tries to access those network interfaces in rsi_wowlan_config() which was invoked by SDIO shutdown, results in a crash. Added a NULL check before accessing the network interfaces in rsi_wowlan_config(). Signed-off-by: Sanjay Kumar Konduri Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 80e7f4f4f188..743706eeffa2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1804,10 +1804,15 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) struct rsi_common *common = adapter->priv; u16 triggers = 0; u16 rx_filter_word = 0; - struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf; + struct ieee80211_bss_conf *bss = NULL; rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n"); + if (!adapter->vifs[0]) + return -EINVAL; + + bss = &adapter->vifs[0]->bss_conf; + if (WARN_ON(!wowlan)) { rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n"); return -EINVAL; -- cgit v1.2.3 From d76f8513fa9050b6fff477beb453828e5024ef4b Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Fri, 11 May 2018 20:27:52 +0530 Subject: rsi: reset hibernate_resume flag to work hibernate resume in coex mode. In coex mode, observed hibernate resume is not working properly, as the hibernate_resume flag is not getting reset in rsi_coex_recv_pkt(), when common card ready indication received from firmware. Hence resetting hibernate_resume flag in this function. Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_coex.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index d055099dadf1..c8ba148f8c6c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -73,6 +73,7 @@ int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) switch (msg_type) { case COMMON_CARD_READY_IND: rsi_dbg(INFO_ZONE, "common card ready received\n"); + common->hibernate_resume = false; rsi_handle_card_ready(common, msg); break; case SLEEP_NOTIFY_IND: -- cgit v1.2.3 From 1d18c5584c05bb059a0ce2fca5bf3c523d6fa4d2 Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Fri, 11 May 2018 20:27:53 +0530 Subject: rsi: Set wowlan flag while writing wowlan config parameters As wowlan enable flag did not set, while writing wowlan parameters to card using rsi_send_vap_dynamic_update, which results firmware is unable to set wowlan configurations. Hence, setting wowlan flag before sending parameters. Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 743706eeffa2..3faa0449a5ef 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1818,6 +1818,7 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) return -EINVAL; } + common->wow_flags |= RSI_WOW_ENABLED; triggers = rsi_wow_map_triggers(common, wowlan); if (!triggers) { rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__); @@ -1840,7 +1841,6 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) rx_filter_word = (ALLOW_DATA_ASSOC_PEER | DISALLOW_BEACONS); rsi_send_rx_filter_frame(common, rx_filter_word); - common->wow_flags |= RSI_WOW_ENABLED; return 0; } -- cgit v1.2.3 From 66cffd6daab76caebab26eb803b92182414fc182 Mon Sep 17 00:00:00 2001 From: Taketo Kabe Date: Sun, 13 May 2018 18:16:40 +0900 Subject: b43: fix transmit failure when VT is switched Setup: Using BCM4306 rev.03 chip based CardBus wireless card. IRQ is shared with yenta (cardbus bridge) and i915 (display) driver. For firmware, installed latest but dated openfwwf 5.2 (http://netweb.ing.unibs.it/~openfwwf/) How-to-reproduce: Do "ssh ", then "ls -lR /" to generate traffic, then repeatedly switch VTs by Alt-F1<>Alt-F2. Eventually (within a minute) the card stops working. You can receive traffic but no transmission. For unknown reason it doesn't occur when just generating traffic by "ssh ls -lR /". With CONFIG_B43_DEBUG=y kernel config, when it stops, the debug message shows kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 148, but got 180 The slot offset I observed so far was always 32. When err_out2 is not set to make error messages successive, the debug output will be like this: kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 148 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 150 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 120 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 152 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 122 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 154 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 124 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 156 kernel: b43-phy1 debug: Out of order TX status report on DMA ring 1. Expected 116, but got 126 The TX ring alternates between 2 sequences; the ring seems to be completely confused. Controller restart is needed. Workaround(1): This problem doesn't occur when using propriatory firmware you will extract by b43-fwcutter, so it may be a bug in openfwwf firmware, as the comment in the b43_dma_handle_txstatus() suggests. I wasn't able to find a bug in the terse openfwwf code though. Workaround(2): Using "pio=1" option to not use DMA makes this problem to not occur. Description of the patch: This patch will forcibly reset the controller to make it work again. Very kludgy and doesn't look right, but the traffic will continue to flow. Signed-off-by: Taketo Kabe Reviewed-by: Michael Buesch Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43/dma.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 6837064908be..6b0e1ec346cb 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1484,7 +1484,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, int slot, firstused; bool frame_succeed; int skip; - static u8 err_out1, err_out2; + static u8 err_out1; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1518,13 +1518,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } } else { /* More than a single header/data pair were missed. - * Report this error once. + * Report this error, and reset the controller to + * revive operation. */ - if (!err_out2) - b43dbg(dev->wl, - "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", - ring->index, firstused, slot); - err_out2 = 1; + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + b43_controller_restart(dev, "Out of order TX"); return; } } -- cgit v1.2.3 From 763ece85f45a6b93268e25a0abf02922f911dab4 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Tue, 15 May 2018 11:14:44 +0200 Subject: brcmfmac: fix initialization of struct cfg80211_inform_bss variable This patch fixes a sparse warning "Using plain integer as NULL pointer" about cfg80211_inform_bss structure initialization. Reported-by: kbuild test robot Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 84135dac49b3..f5b405c98047 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2737,7 +2737,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, u16 notify_interval; u8 *notify_ie; size_t notify_ielen; - struct cfg80211_inform_bss bss_data = { 0 }; + struct cfg80211_inform_bss bss_data = {}; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { brcmf_err("Bss info is larger than buffer. Discarding\n"); -- cgit v1.2.3 From 542671fe4d86ad42b132d8814b6847ee1414aba6 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 14 May 2018 22:04:55 +0200 Subject: net: phy: mscc-miim: Add MDIO driver Add a driver for the Microsemi MII Management controller (MIIM) found on Microsemi SoCs. On Ocelot, there are two controllers, one is connected to the internal PHYs, the other one can communicate with external PHYs. Reviewed-by: Andrew Lunn Signed-off-by: Alexandre Belloni Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 7 ++ drivers/net/phy/Makefile | 1 + drivers/net/phy/mdio-mscc-miim.c | 197 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+) create mode 100644 drivers/net/phy/mdio-mscc-miim.c (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 19a5778a7ec7..0e2305ccc91f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -123,6 +123,13 @@ config MDIO_MOXART This driver supports the MDIO interface found in the network interface units of the MOXA ART SoC +config MDIO_MSCC_MIIM + tristate "Microsemi MIIM interface support" + depends on HAS_IOMEM + help + This driver supports the MIIM (MDIO) interface found in the network + switches of the Microsemi SoCs + config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" depends on 64BIT diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 738246960e3b..5805c0b7d60e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c new file mode 100644 index 000000000000..8c689ccfdbca --- /dev/null +++ b/drivers/net/phy/mdio-mscc-miim.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Driver for the MDIO interface of Microsemi network switches. + * + * Author: Alexandre Belloni + * Copyright (c) 2017 Microsemi Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSCC_MIIM_REG_STATUS 0x0 +#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3) +#define MSCC_MIIM_REG_CMD 0x8 +#define MSCC_MIIM_CMD_OPR_WRITE BIT(1) +#define MSCC_MIIM_CMD_OPR_READ BIT(2) +#define MSCC_MIIM_CMD_WRDATA_SHIFT 4 +#define MSCC_MIIM_CMD_REGAD_SHIFT 20 +#define MSCC_MIIM_CMD_PHYAD_SHIFT 25 +#define MSCC_MIIM_CMD_VLD BIT(31) +#define MSCC_MIIM_REG_DATA 0xC +#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) + +#define MSCC_PHY_REG_PHY_CFG 0x0 +#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) +#define PHY_CFG_PHY_COMMON_RESET BIT(4) +#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8)) +#define MSCC_PHY_REG_PHY_STATUS 0x4 + +struct mscc_miim_dev { + void __iomem *regs; + void __iomem *phy_regs; +}; + +static int mscc_miim_wait_ready(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + u32 val; + + readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + !(val & MSCC_MIIM_STATUS_STAT_BUSY), 100, 250000); + if (val & MSCC_MIIM_STATUS_STAT_BUSY) + return -ETIMEDOUT; + + return 0; +} + +static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct mscc_miim_dev *miim = bus->priv; + u32 val; + int ret; + + ret = mscc_miim_wait_ready(bus); + if (ret) + goto out; + + writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ, + miim->regs + MSCC_MIIM_REG_CMD); + + ret = mscc_miim_wait_ready(bus); + if (ret) + goto out; + + val = readl(miim->regs + MSCC_MIIM_REG_DATA); + if (val & MSCC_MIIM_DATA_ERROR) { + ret = -EIO; + goto out; + } + + ret = val & 0xFFFF; +out: + return ret; +} + +static int mscc_miim_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct mscc_miim_dev *miim = bus->priv; + int ret; + + ret = mscc_miim_wait_ready(bus); + if (ret < 0) + goto out; + + writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | + (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | + MSCC_MIIM_CMD_OPR_WRITE, + miim->regs + MSCC_MIIM_REG_CMD); + +out: + return ret; +} + +static int mscc_miim_reset(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + + if (miim->phy_regs) { + writel(0, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); + writel(0x1ff, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); + mdelay(500); + } + + return 0; +} + +static int mscc_miim_probe(struct platform_device *pdev) +{ + struct resource *res; + struct mii_bus *bus; + struct mscc_miim_dev *dev; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev)); + if (!bus) + return -ENOMEM; + + bus->name = "mscc_miim"; + bus->read = mscc_miim_read; + bus->write = mscc_miim_write; + bus->reset = mscc_miim_reset; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); + bus->parent = &pdev->dev; + + dev = bus->priv; + dev->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->regs)) { + dev_err(&pdev->dev, "Unable to map MIIM registers\n"); + return PTR_ERR(dev->regs); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + dev->phy_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->phy_regs)) { + dev_err(&pdev->dev, "Unable to map internal phy registers\n"); + return PTR_ERR(dev->phy_regs); + } + } + + if (pdev->dev.of_node) + ret = of_mdiobus_register(bus, pdev->dev.of_node); + else + ret = mdiobus_register(bus); + + if (ret < 0) { + dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + return ret; + } + + platform_set_drvdata(pdev, bus); + + return 0; +} + +static int mscc_miim_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus); + + return 0; +} + +static const struct of_device_id mscc_miim_match[] = { + { .compatible = "mscc,ocelot-miim" }, + { } +}; +MODULE_DEVICE_TABLE(of, mscc_miim_match); + +static struct platform_driver mscc_miim_driver = { + .probe = mscc_miim_probe, + .remove = mscc_miim_remove, + .driver = { + .name = "mscc-miim", + .of_match_table = mscc_miim_match, + }, +}; + +module_platform_driver(mscc_miim_driver); + +MODULE_DESCRIPTION("Microsemi MIIM driver"); +MODULE_AUTHOR("Alexandre Belloni "); +MODULE_LICENSE("Dual MIT/GPL"); -- cgit v1.2.3 From a556c76adc052c979ef9e80f0cd3fa1379ff4943 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 14 May 2018 22:04:57 +0200 Subject: net: mscc: Add initial Ocelot switch support Add a driver for Microsemi Ocelot Ethernet switch support. This makes two modules: mscc_ocelot_common handles all the common features that doesn't depend on how the switch is integrated in the SoC. Currently, it handles offloading bridging to the hardware. ocelot_io.c handles register accesses. This is unfortunately needed because the register layout is packed and then depends on the number of ports available on the switch. The register definition files are automatically generated. ocelot_board handles the switch integration on the SoC and on the board. Frame injection and extraction to/from the CPU port is currently done using register accesses which is quite slow. DMA is possible but the port is not able to absorb the whole switch bandwidth. Signed-off-by: Alexandre Belloni Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/mscc/Kconfig | 30 + drivers/net/ethernet/mscc/Makefile | 5 + drivers/net/ethernet/mscc/ocelot.c | 1333 +++++++++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot.h | 572 ++++++++++++ drivers/net/ethernet/mscc/ocelot_ana.h | 625 +++++++++++++ drivers/net/ethernet/mscc/ocelot_board.c | 316 +++++++ drivers/net/ethernet/mscc/ocelot_dev.h | 275 ++++++ drivers/net/ethernet/mscc/ocelot_dev_gmii.h | 154 ++++ drivers/net/ethernet/mscc/ocelot_hsio.h | 785 ++++++++++++++++ drivers/net/ethernet/mscc/ocelot_io.c | 116 +++ drivers/net/ethernet/mscc/ocelot_qs.h | 78 ++ drivers/net/ethernet/mscc/ocelot_qsys.h | 270 ++++++ drivers/net/ethernet/mscc/ocelot_regs.c | 497 ++++++++++ drivers/net/ethernet/mscc/ocelot_rew.h | 81 ++ drivers/net/ethernet/mscc/ocelot_sys.h | 144 +++ 17 files changed, 5283 insertions(+) create mode 100644 drivers/net/ethernet/mscc/Kconfig create mode 100644 drivers/net/ethernet/mscc/Makefile create mode 100644 drivers/net/ethernet/mscc/ocelot.c create mode 100644 drivers/net/ethernet/mscc/ocelot.h create mode 100644 drivers/net/ethernet/mscc/ocelot_ana.h create mode 100644 drivers/net/ethernet/mscc/ocelot_board.c create mode 100644 drivers/net/ethernet/mscc/ocelot_dev.h create mode 100644 drivers/net/ethernet/mscc/ocelot_dev_gmii.h create mode 100644 drivers/net/ethernet/mscc/ocelot_hsio.h create mode 100644 drivers/net/ethernet/mscc/ocelot_io.c create mode 100644 drivers/net/ethernet/mscc/ocelot_qs.h create mode 100644 drivers/net/ethernet/mscc/ocelot_qsys.h create mode 100644 drivers/net/ethernet/mscc/ocelot_regs.c create mode 100644 drivers/net/ethernet/mscc/ocelot_rew.h create mode 100644 drivers/net/ethernet/mscc/ocelot_sys.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 603a5704dab8..54d71e1c48d5 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -114,6 +114,7 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" +source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 2bfd2eea50bf..8fbfe9ce2fa5 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig new file mode 100644 index 000000000000..36c84625d54e --- /dev/null +++ b/drivers/net/ethernet/mscc/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +config NET_VENDOR_MICROSEMI + bool "Microsemi devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Microsemi devices. + +if NET_VENDOR_MICROSEMI + +config MSCC_OCELOT_SWITCH + tristate "Ocelot switch driver" + depends on NET_SWITCHDEV + depends on HAS_IOMEM + select PHYLIB + select REGMAP_MMIO + help + This driver supports the Ocelot network switch device. + +config MSCC_OCELOT_SWITCH_OCELOT + tristate "Ocelot switch driver on Ocelot" + depends on MSCC_OCELOT_SWITCH + help + This driver supports the Ocelot network switch device as present on + the Ocelot SoCs. + +endif # NET_VENDOR_MICROSEMI diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile new file mode 100644 index 000000000000..cb52a3b128ae --- /dev/null +++ b/drivers/net/ethernet/mscc/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o +mscc_ocelot_common-y := ocelot.o ocelot_io.o +mscc_ocelot_common-y += ocelot_regs.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c new file mode 100644 index 000000000000..c8c74aa548d9 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -0,0 +1,1333 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ocelot.h" + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACv4, + ENTRYTYPE_MACv6, +}; + +struct ocelot_mact_entry { + u8 mac[ETH_ALEN]; + u16 vid; + enum macaccess_entry_type type; +}; + +static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) +{ + unsigned int val, timeout = 10; + + /* Wait for the issued mac table command to be completed, or timeout. + * When the command read from ANA_TABLES_MACACCESS is + * MACACCESS_CMD_IDLE, the issued command completed successfully. + */ + do { + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M; + } while (val != MACACCESS_CMD_IDLE && timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static void ocelot_mact_select(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA); + ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA); + +} + +static int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a write command */ + ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_DEST_IDX(port) | + ANA_TABLES_MACACCESS_ENTRYTYPE(type) | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN), + ANA_TABLES_MACACCESS); + + return ocelot_mact_wait_for_completion(ocelot); +} + +static int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a forget command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), + ANA_TABLES_MACACCESS); + + return ocelot_mact_wait_for_completion(ocelot); +} + +static void ocelot_mact_init(struct ocelot *ocelot) +{ + /* Configure the learning mode entries attributes: + * - Do not copy the frame to the CPU extraction queues. + * - Use the vlan and mac_cpoy for dmac lookup. + */ + ocelot_rmw(ocelot, 0, + ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS + | ANA_AGENCTRL_LEARN_FWD_KILL + | ANA_AGENCTRL_LEARN_IGNORE_VLAN, + ANA_AGENCTRL); + + /* Clear the MAC table */ + ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); +} + +static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) +{ + unsigned int val, timeout = 10; + + /* Wait for the issued mac table command to be completed, or timeout. + * When the command read from ANA_TABLES_MACACCESS is + * MACACCESS_CMD_IDLE, the issued command completed successfully. + */ + do { + val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); + val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M; + } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static void ocelot_vlan_init(struct ocelot *ocelot) +{ + /* Clear VLAN table, by default all ports are members of all VLANs */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, + ANA_TABLES_VLANACCESS); + ocelot_vlant_wait_for_completion(ocelot); +} + +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 ocelot_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + +static void ocelot_port_adjust_link(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + int speed, atop_wm, mode = 0; + + switch (dev->phydev->speed) { + case SPEED_10: + speed = OCELOT_SPEED_10; + break; + case SPEED_100: + speed = OCELOT_SPEED_100; + break; + case SPEED_1000: + speed = OCELOT_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + break; + case SPEED_2500: + speed = OCELOT_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + break; + default: + netdev_err(dev, "Unsupported PHY speed: %d\n", + dev->phydev->speed); + return; + } + + phy_print_status(dev->phydev); + + if (!dev->phydev->link) + return; + + /* Only full duplex supported for now */ + ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA | + mode, DEV_MAC_MODE_CFG); + + /* Set MAC IFG Gaps + * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 + * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 + */ + ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG); + + /* Load seed (0) and set MAC HDX late collision */ + ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | + DEV_MAC_HDX_CFG_SEED_LOAD, + DEV_MAC_HDX_CFG); + mdelay(1); + ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), + DEV_MAC_HDX_CFG); + + /* Disable HDX fast control */ + ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG); + ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(port, 0, PCS1G_LB_CFG); + + /* Set Max Length and maximum tags allowed */ + ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG); + ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, + DEV_MAC_TAGS_CFG); + + /* Enable MAC module */ + ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA | + DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); + + /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of + * reset */ + ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed), + DEV_CLOCK_CFG); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG); + ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG); + + /* No PFC */ + ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed), + ANA_PFC_PFC_CFG, p); + + /* Set Pause WM hysteresis + * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ + */ + ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | + SYS_PAUSE_CFG_PAUSE_STOP(101) | + SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p); + + /* Core: Enable port for frame transfer */ + ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, p); + + /* Flow control */ + ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_FC_LINK_SPEED(speed), + SYS_MAC_FC_CFG, p); + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p); + + /* Tail dropping watermark */ + atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN), + SYS_ATOP, p); + ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); +} + +static int ocelot_port_open(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int err; + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | + ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), + ANA_PORT_PORT_CFG, port->chip_port); + + err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, + PHY_INTERFACE_MODE_NA); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + dev->phydev = port->phy; + + phy_attached_info(port->phy); + phy_start(port->phy); + return 0; +} + +static int ocelot_port_stop(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + + phy_disconnect(port->phy); + + dev->phydev = NULL; + + ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG); + ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, port->chip_port); + return 0; +} + +/* Generate the IFH for frame injection + * + * The IFH is a 128bit-value + * bit 127: bypass the analyzer processing + * bit 56-67: destination mask + * bit 28-29: pop_cnt: 3 disables all rewriting of the frame + * bit 20-27: cpu extraction queue mask + * bit 16: tag type 0: C-tag, 1: S-tag + * bit 0-11: VID + */ +static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) +{ + ifh[0] = IFH_INJ_BYPASS; + ifh[1] = (0xff00 & info->port) >> 8; + ifh[2] = (0xff & info->port) << 24; + ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) | + (info->tag_type << 16) | info->vid; + + return 0; +} + +static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + u32 val, ifh[IFH_LEN]; + struct frame_info info = {}; + u8 grp = 0; /* Send everything on CPU group 0 */ + unsigned int i, count, last; + + val = ocelot_read(ocelot, QS_INJ_STATUS); + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || + (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))) + return NETDEV_TX_BUSY; + + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + + info.port = BIT(port->chip_port); + info.cpuq = 0xff; + ocelot_gen_ifh(ifh, &info); + + for (i = 0; i < IFH_LEN; i++) + ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); + + count = (skb->len + 3) / 4; + last = skb->len % 4; + for (i = 0; i < count; i++) { + ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); + } + + /* Add padding */ + while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + i++; + } + + /* Indicate EOF and valid bytes in last word */ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | + QS_INJ_CTRL_EOF, + QS_INJ_CTRL, grp); + + /* Add dummy CRC */ + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static void ocelot_mact_mc_reset(struct ocelot_port *port) +{ + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha, *n; + + /* Free and forget all the MAC addresses stored in the port private mc + * list. These are mc addresses that were previously added by calling + * ocelot_mact_mc_add(). + */ + list_for_each_entry_safe(ha, n, &port->mc, list) { + ocelot_mact_forget(ocelot, ha->addr, port->pvid); + list_del(&ha->list); + kfree(ha); + } +} + +static int ocelot_mact_mc_add(struct ocelot_port *port, + struct netdev_hw_addr *hw_addr) +{ + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + + if (!ha) + return -ENOMEM; + + memcpy(ha, hw_addr, sizeof(*ha)); + list_add_tail(&ha->list, &port->mc); + + ocelot_mact_learn(ocelot, PGID_CPU, ha->addr, port->pvid, + ENTRYTYPE_LOCKED); + + return 0; +} + +static void ocelot_set_rx_mode(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha; + int i; + u32 val; + + /* This doesn't handle promiscuous mode because the bridge core is + * setting IFF_PROMISC on all slave interfaces and all frames would be + * forwarded to the CPU port. + */ + val = GENMASK(ocelot->num_phys_ports - 1, 0); + for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + + /* Handle the device multicast addresses. First remove all the + * previously installed addresses and then add the latest ones to the + * mac table. + */ + ocelot_mact_mc_reset(port); + netdev_for_each_mc_addr(ha, dev) + ocelot_mact_mc_add(port, ha); +} + +static int ocelot_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct ocelot_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->chip_port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int ocelot_port_set_mac_address(struct net_device *dev, void *p) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + const struct sockaddr *addr = p; + + /* Learn the new net device MAC address in the mac table. */ + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid, + ENTRYTYPE_LOCKED); + /* Then forget the previous one. */ + ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid); + + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; +} + +static void ocelot_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port), + SYS_STAT_CFG); + + /* Get Rx stats */ + stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS); + stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) + + ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) + + ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) + + ocelot_read(ocelot, SYS_COUNT_RX_LONGS) + + ocelot_read(ocelot, SYS_COUNT_RX_64) + + ocelot_read(ocelot, SYS_COUNT_RX_65_127) + + ocelot_read(ocelot, SYS_COUNT_RX_128_255) + + ocelot_read(ocelot, SYS_COUNT_RX_256_1023) + + ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX); + stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST); + stats->rx_dropped = dev->stats.rx_dropped; + + /* Get Tx stats */ + stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS); + stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) + + ocelot_read(ocelot, SYS_COUNT_TX_65_127) + + ocelot_read(ocelot, SYS_COUNT_TX_128_511) + + ocelot_read(ocelot, SYS_COUNT_TX_512_1023) + + ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX); + stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) + + ocelot_read(ocelot, SYS_COUNT_TX_AGING); + stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION); +} + +static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, + ENTRYTYPE_NORMAL); +} + +static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + return ocelot_mact_forget(ocelot, addr, vid); +} + +struct ocelot_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry, + struct ocelot_dump_ctx *dump) +{ + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = NUD_REACHABLE; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac)) + goto nla_put_failure; + + if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col, + struct ocelot_mact_entry *entry) +{ + struct ocelot *ocelot = port->ocelot; + char mac[ETH_ALEN]; + u32 val, dst, macl, mach; + + /* Set row and column to read from */ + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col); + + /* Issue a read command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), + ANA_TABLES_MACACCESS); + + if (ocelot_mact_wait_for_completion(ocelot)) + return -ETIMEDOUT; + + /* Read the entry flags */ + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + if (!(val & ANA_TABLES_MACACCESS_VALID)) + return -EINVAL; + + /* If the entry read has another port configured as its destination, + * do not report it. + */ + dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; + if (dst != port->chip_port) + return -EINVAL; + + /* Get the entry's MAC address and VLAN id */ + macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA); + mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA); + + mac[0] = (mach >> 8) & 0xff; + mac[1] = (mach >> 0) & 0xff; + mac[2] = (macl >> 24) & 0xff; + mac[3] = (macl >> 16) & 0xff; + mac[4] = (macl >> 8) & 0xff; + mac[5] = (macl >> 0) & 0xff; + + entry->vid = (mach >> 16) & 0xfff; + ether_addr_copy(entry->mac, mac); + + return 0; +} + +static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int *idx) +{ + struct ocelot_port *port = netdev_priv(dev); + int i, j, ret = 0; + struct ocelot_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + + struct ocelot_mact_entry entry; + + /* Loop through all the mac tables entries. There are 1024 rows of 4 + * entries. + */ + for (i = 0; i < 1024; i++) { + for (j = 0; j < 4; j++) { + ret = ocelot_mact_read(port, i, j, &entry); + /* If the entry is invalid (wrong port, invalid...), + * skip it. + */ + if (ret == -EINVAL) + continue; + else if (ret) + goto end; + + ret = ocelot_fdb_do_dump(&entry, &dump); + if (ret) + goto end; + } + } + +end: + *idx = dump.idx; + return ret; +} + +static const struct net_device_ops ocelot_port_netdev_ops = { + .ndo_open = ocelot_port_open, + .ndo_stop = ocelot_port_stop, + .ndo_start_xmit = ocelot_port_xmit, + .ndo_set_rx_mode = ocelot_set_rx_mode, + .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, + .ndo_set_mac_address = ocelot_port_set_mac_address, + .ndo_get_stats64 = ocelot_get_stats64, + .ndo_fdb_add = ocelot_fdb_add, + .ndo_fdb_del = ocelot_fdb_del, + .ndo_fdb_dump = ocelot_fdb_dump, +}; + +static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct ocelot_port *port = netdev_priv(netdev); + struct ocelot *ocelot = port->ocelot; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < ocelot->num_stats; i++) + memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, + ETH_GSTRING_LEN); +} + +static void ocelot_check_stats(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + int i, j; + + mutex_lock(&ocelot->stats_lock); + + for (i = 0; i < ocelot->num_phys_ports; i++) { + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); + + for (j = 0; j < ocelot->num_stats; j++) { + u32 val; + unsigned int idx = i * ocelot->num_stats + j; + + val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, + ocelot->stats_layout[j].offset); + + if (val < (ocelot->stats[idx] & U32_MAX)) + ocelot->stats[idx] += (u64)1 << 32; + + ocelot->stats[idx] = (ocelot->stats[idx] & + ~(u64)U32_MAX) + val; + } + } + + cancel_delayed_work(&ocelot->stats_work); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); + + mutex_unlock(&ocelot->stats_lock); +} + +static void ocelot_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int i; + + /* check and update now */ + ocelot_check_stats(&ocelot->stats_work.work); + + /* Copy all counters */ + for (i = 0; i < ocelot->num_stats; i++) + *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i]; +} + +static int ocelot_get_sset_count(struct net_device *dev, int sset) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + return ocelot->num_stats; +} + +static const struct ethtool_ops ocelot_ethtool_ops = { + .get_strings = ocelot_get_strings, + .get_ethtool_stats = ocelot_get_ethtool_stats, + .get_sset_count = ocelot_get_sset_count, +}; + +static int ocelot_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot *ocelot = ocelot_port->ocelot; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(ocelot->base_mac); + memcpy(&attr->u.ppid.id, &ocelot->base_mac, + attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, + struct switchdev_trans *trans, + u8 state) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + u32 port_cfg; + int port, i; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask)) + return 0; + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, + ocelot_port->chip_port); + + switch (state) { + case BR_STATE_FORWARDING: + ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port); + /* Fallthrough */ + case BR_STATE_LEARNING: + port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; + break; + + default: + port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA; + ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port); + break; + } + + ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, + ocelot_port->chip_port); + + /* Apply FWD mask. The loop is needed to add/remove the current port as + * a source for the other ports. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (ocelot->bridge_fwd_mask & BIT(port)) { + unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port); + + for (i = 0; i < ocelot->num_phys_ports; i++) { + unsigned long bond_mask = ocelot->lags[i]; + + if (!bond_mask) + continue; + + if (bond_mask & BIT(port)) { + mask &= ~bond_mask; + break; + } + } + + ocelot_write_rix(ocelot, + BIT(ocelot->num_phys_ports) | mask, + ANA_PGID_PGID, PGID_SRC + port); + } else { + /* Only the CPU port, this is compatible with link + * aggregation. + */ + ocelot_write_rix(ocelot, + BIT(ocelot->num_phys_ports), + ANA_PGID_PGID, PGID_SRC + port); + } + } + + return 0; +} + +static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port, + unsigned long ageing_clock_t) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2), + ANA_AUTOAGE); +} + +static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc) +{ + struct ocelot *ocelot = port->ocelot; + u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG, + port->chip_port); + + if (mc) + val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; + else + val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA); + + ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port); +} + +static int ocelot_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ocelot_port_attr_stp_state_set(ocelot_port, trans, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, + const unsigned char *addr, + u16 vid) +{ + struct ocelot_multicast *mc; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) + return mc; + } + + return NULL; +} + +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct ocelot_multicast *mc; + unsigned char addr[ETH_ALEN]; + u16 vid = mdb->vid; + bool new = false; + + if (!vid) + vid = 1; + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) { + mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); + if (!mc) + return -ENOMEM; + + memcpy(mc->addr, mdb->addr, ETH_ALEN); + mc->vid = vid; + + list_add_tail(&mc->list, &ocelot->multicast); + new = true; + } + + memcpy(addr, mc->addr, ETH_ALEN); + addr[0] = 0; + + if (!new) { + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + ocelot_mact_forget(ocelot, addr, vid); + } + + mc->ports |= BIT(port->chip_port); + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + + return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct ocelot_multicast *mc; + unsigned char addr[ETH_ALEN]; + u16 vid = mdb->vid; + + if (!vid) + vid = 1; + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) + return -ENOENT; + + memcpy(addr, mc->addr, ETH_ALEN); + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + addr[0] = 0; + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~BIT(port->chip_port); + if (!mc->ports) { + list_del(&mc->list); + devm_kfree(ocelot->dev, mc); + return 0; + } + + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + + return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); +} + +static int ocelot_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int ocelot_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static const struct switchdev_ops ocelot_port_switchdev_ops = { + .switchdev_port_attr_get = ocelot_port_attr_get, + .switchdev_port_attr_set = ocelot_port_attr_set, + .switchdev_port_obj_add = ocelot_port_obj_add, + .switchdev_port_obj_del = ocelot_port_obj_del, +}; + +static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, + struct net_device *bridge) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + + if (!ocelot->bridge_mask) { + ocelot->hw_bridge_dev = bridge; + } else { + if (ocelot->hw_bridge_dev != bridge) + /* This is adding the port to a second bridge, this is + * unsupported */ + return -ENODEV; + } + + ocelot->bridge_mask |= BIT(ocelot_port->chip_port); + + return 0; +} + +static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, + struct net_device *bridge) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + + ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port); + + if (!ocelot->bridge_mask) + ocelot->hw_bridge_dev = NULL; +} + +/* Checks if the net_device instance given to us originate from our driver. */ +static bool ocelot_netdevice_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &ocelot_port_netdev_ops; +} + +static int ocelot_netdevice_port_event(struct net_device *dev, + unsigned long event, + struct netdev_notifier_changeupper_info *info) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + int err = 0; + + if (!ocelot_netdevice_dev_check(dev)) + return 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_bridge_join(ocelot_port, + info->upper_dev); + else + ocelot_port_bridge_leave(ocelot_port, + info->upper_dev); + } + break; + default: + break; + } + + return err; +} + +static int ocelot_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret; + + if (netif_is_lag_master(dev)) { + struct net_device *slave; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, slave, iter) { + ret = ocelot_netdevice_port_event(slave, event, info); + if (ret) + goto notify; + } + } else { + ret = ocelot_netdevice_port_event(dev, event, info); + } + +notify: + return notifier_from_errno(ret); +} + +struct notifier_block ocelot_netdevice_nb __read_mostly = { + .notifier_call = ocelot_netdevice_event, +}; +EXPORT_SYMBOL(ocelot_netdevice_nb); + +int ocelot_probe_port(struct ocelot *ocelot, u8 port, + void __iomem *regs, + struct phy_device *phy) +{ + struct ocelot_port *ocelot_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct ocelot_port)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, ocelot->dev); + ocelot_port = netdev_priv(dev); + ocelot_port->dev = dev; + ocelot_port->ocelot = ocelot; + ocelot_port->regs = regs; + ocelot_port->chip_port = port; + ocelot_port->phy = phy; + INIT_LIST_HEAD(&ocelot_port->mc); + ocelot->ports[port] = ocelot_port; + + dev->netdev_ops = &ocelot_port_netdev_ops; + dev->ethtool_ops = &ocelot_ethtool_ops; + dev->switchdev_ops = &ocelot_port_switchdev_ops; + + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); + dev->dev_addr[ETH_ALEN - 1] += port; + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, + ENTRYTYPE_LOCKED); + + err = register_netdev(dev); + if (err) { + dev_err(ocelot->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + + return 0; + +err_register_netdev: + free_netdev(dev); + return err; +} +EXPORT_SYMBOL(ocelot_probe_port); + +int ocelot_init(struct ocelot *ocelot) +{ + u32 port; + int i, cpu = ocelot->num_phys_ports; + char queue_name[32]; + + ocelot->stats = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports * ocelot->num_stats, + sizeof(u64), GFP_KERNEL); + if (!ocelot->stats) + return -ENOMEM; + + mutex_init(&ocelot->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(ocelot->dev)); + ocelot->stats_queue = create_singlethread_workqueue(queue_name); + if (!ocelot->stats_queue) + return -ENOMEM; + + ocelot_mact_init(ocelot); + ocelot_vlan_init(ocelot); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Clear all counters (5 groups) */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f), + SYS_STAT_CFG); + } + + /* Only use S-Tag */ + ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG); + + /* Aggregation mode */ + ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | + ANA_AGGR_CFG_AC_DMAC_ENA | + ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG); + + /* Set MAC age time to default value. The entry is aged after + * 2*AGE_PERIOD + */ + ocelot_write(ocelot, + ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ), + ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); + + /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ + ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA | + SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); + + /* Setup flooding PGIDs */ + ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | + ANA_FLOODING_FLD_BROADCAST(PGID_MC) | + ANA_FLOODING_FLD_UNICAST(PGID_UC), + ANA_FLOODING, 0); + ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC), + ANA_FLOODING_IPMC); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Transmit the frame to the local port. */ + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + /* Do not forward BPDU frames to the front ports. */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), + ANA_PORT_CPU_FWD_BPDU_CFG, + port); + /* Ensure bridging is disabled */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); + } + + /* Configure and enable the CPU port. */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(cpu), + ANA_PORT_PORT_CFG, cpu); + + /* Allow broadcast MAC frames. */ + for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { + u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); + + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + } + ocelot_write_rix(ocelot, + ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), + ANA_PGID_PGID, PGID_MC); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); + + /* CPU port Injection/Extraction configuration */ + ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, cpu); + ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) | + SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu); + /* Allow manual injection via DEVCPU_QS registers, and byte swap these + * registers endianness. + */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP | + QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP | + QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0); + ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) | + ANA_CPUQ_CFG_CPUQ_LRN(2) | + ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) | + ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) | + ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) | + ANA_CPUQ_CFG_CPUQ_IGMP(6) | + ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG); + for (i = 0; i < 16; i++) + ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) | + ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), + ANA_CPUQ_8021_CFG, i); + + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); + return 0; +} +EXPORT_SYMBOL(ocelot_init); + +void ocelot_deinit(struct ocelot *ocelot) +{ + destroy_workqueue(ocelot->stats_queue); + mutex_destroy(&ocelot->stats_lock); +} +EXPORT_SYMBOL(ocelot_deinit); + +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h new file mode 100644 index 000000000000..097bd12a10d4 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -0,0 +1,572 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_H_ +#define _MSCC_OCELOT_H_ + +#include +#include +#include +#include +#include + +#include "ocelot_ana.h" +#include "ocelot_dev.h" +#include "ocelot_hsio.h" +#include "ocelot_qsys.h" +#include "ocelot_rew.h" +#include "ocelot_sys.h" +#include "ocelot_qs.h" + +#define PGID_AGGR 64 +#define PGID_SRC 80 + +/* Reserved PGIDs */ +#define PGID_CPU (PGID_AGGR - 5) +#define PGID_UC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +#define OCELOT_BUFFER_CELL_SZ 60 + +#define OCELOT_STATS_CHECK_DELAY (2 * HZ) + +#define IFH_LEN 4 + +struct frame_info { + u32 len; + u16 port; + u16 vid; + u8 cpuq; + u8 tag_type; +}; + +#define IFH_INJ_BYPASS BIT(31) +#define IFH_INJ_POP_CNT_DISABLE (3 << 28) + +#define IFH_TAG_TYPE_C 0 +#define IFH_TAG_TYPE_S 1 + +#define OCELOT_SPEED_2500 0 +#define OCELOT_SPEED_1000 1 +#define OCELOT_SPEED_100 2 +#define OCELOT_SPEED_10 3 + +#define TARGET_OFFSET 24 +#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0) +#define REG(reg, offset) [reg & REG_MASK] = offset + +enum ocelot_target { + ANA = 1, + QS, + QSYS, + REW, + SYS, + HSIO, + TARGET_MAX, +}; + +enum ocelot_reg { + ANA_ADVLEARN = ANA << TARGET_OFFSET, + ANA_VLANMASK, + ANA_PORT_B_DOMAIN, + ANA_ANAGEFIL, + ANA_ANEVENTS, + ANA_STORMLIMIT_BURST, + ANA_STORMLIMIT_CFG, + ANA_ISOLATED_PORTS, + ANA_COMMUNITY_PORTS, + ANA_AUTOAGE, + ANA_MACTOPTIONS, + ANA_LEARNDISC, + ANA_AGENCTRL, + ANA_MIRRORPORTS, + ANA_EMIRRORPORTS, + ANA_FLOODING, + ANA_FLOODING_IPMC, + ANA_SFLOW_CFG, + ANA_PORT_MODE, + ANA_CUT_THRU_CFG, + ANA_PGID_PGID, + ANA_TABLES_ANMOVED, + ANA_TABLES_MACHDATA, + ANA_TABLES_MACLDATA, + ANA_TABLES_STREAMDATA, + ANA_TABLES_MACACCESS, + ANA_TABLES_MACTINDX, + ANA_TABLES_VLANACCESS, + ANA_TABLES_VLANTIDX, + ANA_TABLES_ISDXACCESS, + ANA_TABLES_ISDXTIDX, + ANA_TABLES_ENTRYLIM, + ANA_TABLES_PTP_ID_HIGH, + ANA_TABLES_PTP_ID_LOW, + ANA_TABLES_STREAMACCESS, + ANA_TABLES_STREAMTIDX, + ANA_TABLES_SEQ_HISTORY, + ANA_TABLES_SEQ_MASK, + ANA_TABLES_SFID_MASK, + ANA_TABLES_SFIDACCESS, + ANA_TABLES_SFIDTIDX, + ANA_MSTI_STATE, + ANA_OAM_UPM_LM_CNT, + ANA_SG_ACCESS_CTRL, + ANA_SG_CONFIG_REG_1, + ANA_SG_CONFIG_REG_2, + ANA_SG_CONFIG_REG_3, + ANA_SG_CONFIG_REG_4, + ANA_SG_CONFIG_REG_5, + ANA_SG_GCL_GS_CONFIG, + ANA_SG_GCL_TI_CONFIG, + ANA_SG_STATUS_REG_1, + ANA_SG_STATUS_REG_2, + ANA_SG_STATUS_REG_3, + ANA_PORT_VLAN_CFG, + ANA_PORT_DROP_CFG, + ANA_PORT_QOS_CFG, + ANA_PORT_VCAP_CFG, + ANA_PORT_VCAP_S1_KEY_CFG, + ANA_PORT_VCAP_S2_CFG, + ANA_PORT_PCP_DEI_MAP, + ANA_PORT_CPU_FWD_CFG, + ANA_PORT_CPU_FWD_BPDU_CFG, + ANA_PORT_CPU_FWD_GARP_CFG, + ANA_PORT_CPU_FWD_CCM_CFG, + ANA_PORT_PORT_CFG, + ANA_PORT_POL_CFG, + ANA_PORT_PTP_CFG, + ANA_PORT_PTP_DLY1_CFG, + ANA_PORT_PTP_DLY2_CFG, + ANA_PORT_SFID_CFG, + ANA_PFC_PFC_CFG, + ANA_PFC_PFC_TIMER, + ANA_IPT_OAM_MEP_CFG, + ANA_IPT_IPT, + ANA_PPT_PPT, + ANA_FID_MAP_FID_MAP, + ANA_AGGR_CFG, + ANA_CPUQ_CFG, + ANA_CPUQ_CFG2, + ANA_CPUQ_8021_CFG, + ANA_DSCP_CFG, + ANA_DSCP_REWR_CFG, + ANA_VCAP_RNG_TYPE_CFG, + ANA_VCAP_RNG_VAL_CFG, + ANA_VRAP_CFG, + ANA_VRAP_HDR_DATA, + ANA_VRAP_HDR_MASK, + ANA_DISCARD_CFG, + ANA_FID_CFG, + ANA_POL_PIR_CFG, + ANA_POL_CIR_CFG, + ANA_POL_MODE_CFG, + ANA_POL_PIR_STATE, + ANA_POL_CIR_STATE, + ANA_POL_STATE, + ANA_POL_FLOWC, + ANA_POL_HYST, + ANA_POL_MISC_CFG, + QS_XTR_GRP_CFG = QS << TARGET_OFFSET, + QS_XTR_RD, + QS_XTR_FRM_PRUNING, + QS_XTR_FLUSH, + QS_XTR_DATA_PRESENT, + QS_XTR_CFG, + QS_INJ_GRP_CFG, + QS_INJ_WR, + QS_INJ_CTRL, + QS_INJ_STATUS, + QS_INJ_ERR, + QS_INH_DBG, + QSYS_PORT_MODE = QSYS << TARGET_OFFSET, + QSYS_SWITCH_PORT_MODE, + QSYS_STAT_CNT_CFG, + QSYS_EEE_CFG, + QSYS_EEE_THRES, + QSYS_IGR_NO_SHARING, + QSYS_EGR_NO_SHARING, + QSYS_SW_STATUS, + QSYS_EXT_CPU_CFG, + QSYS_PAD_CFG, + QSYS_CPU_GROUP_MAP, + QSYS_QMAP, + QSYS_ISDX_SGRP, + QSYS_TIMED_FRAME_ENTRY, + QSYS_TFRM_MISC, + QSYS_TFRM_PORT_DLY, + QSYS_TFRM_TIMER_CFG_1, + QSYS_TFRM_TIMER_CFG_2, + QSYS_TFRM_TIMER_CFG_3, + QSYS_TFRM_TIMER_CFG_4, + QSYS_TFRM_TIMER_CFG_5, + QSYS_TFRM_TIMER_CFG_6, + QSYS_TFRM_TIMER_CFG_7, + QSYS_TFRM_TIMER_CFG_8, + QSYS_RED_PROFILE, + QSYS_RES_QOS_MODE, + QSYS_RES_CFG, + QSYS_RES_STAT, + QSYS_EGR_DROP_MODE, + QSYS_EQ_CTRL, + QSYS_EVENTS_CORE, + QSYS_QMAXSDU_CFG_0, + QSYS_QMAXSDU_CFG_1, + QSYS_QMAXSDU_CFG_2, + QSYS_QMAXSDU_CFG_3, + QSYS_QMAXSDU_CFG_4, + QSYS_QMAXSDU_CFG_5, + QSYS_QMAXSDU_CFG_6, + QSYS_QMAXSDU_CFG_7, + QSYS_PREEMPTION_CFG, + QSYS_CIR_CFG, + QSYS_EIR_CFG, + QSYS_SE_CFG, + QSYS_SE_DWRR_CFG, + QSYS_SE_CONNECT, + QSYS_SE_DLB_SENSE, + QSYS_CIR_STATE, + QSYS_EIR_STATE, + QSYS_SE_STATE, + QSYS_HSCH_MISC_CFG, + QSYS_TAG_CONFIG, + QSYS_TAS_PARAM_CFG_CTRL, + QSYS_PORT_MAX_SDU, + QSYS_PARAM_CFG_REG_1, + QSYS_PARAM_CFG_REG_2, + QSYS_PARAM_CFG_REG_3, + QSYS_PARAM_CFG_REG_4, + QSYS_PARAM_CFG_REG_5, + QSYS_GCL_CFG_REG_1, + QSYS_GCL_CFG_REG_2, + QSYS_PARAM_STATUS_REG_1, + QSYS_PARAM_STATUS_REG_2, + QSYS_PARAM_STATUS_REG_3, + QSYS_PARAM_STATUS_REG_4, + QSYS_PARAM_STATUS_REG_5, + QSYS_PARAM_STATUS_REG_6, + QSYS_PARAM_STATUS_REG_7, + QSYS_PARAM_STATUS_REG_8, + QSYS_PARAM_STATUS_REG_9, + QSYS_GCL_STATUS_REG_1, + QSYS_GCL_STATUS_REG_2, + REW_PORT_VLAN_CFG = REW << TARGET_OFFSET, + REW_TAG_CFG, + REW_PORT_CFG, + REW_DSCP_CFG, + REW_PCP_DEI_QOS_MAP_CFG, + REW_PTP_CFG, + REW_PTP_DLY1_CFG, + REW_RED_TAG_CFG, + REW_DSCP_REMAP_DP1_CFG, + REW_DSCP_REMAP_CFG, + REW_STAT_CFG, + REW_REW_STICKY, + REW_PPT, + SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET, + SYS_COUNT_RX_UNICAST, + SYS_COUNT_RX_MULTICAST, + SYS_COUNT_RX_BROADCAST, + SYS_COUNT_RX_SHORTS, + SYS_COUNT_RX_FRAGMENTS, + SYS_COUNT_RX_JABBERS, + SYS_COUNT_RX_CRC_ALIGN_ERRS, + SYS_COUNT_RX_SYM_ERRS, + SYS_COUNT_RX_64, + SYS_COUNT_RX_65_127, + SYS_COUNT_RX_128_255, + SYS_COUNT_RX_256_1023, + SYS_COUNT_RX_1024_1526, + SYS_COUNT_RX_1527_MAX, + SYS_COUNT_RX_PAUSE, + SYS_COUNT_RX_CONTROL, + SYS_COUNT_RX_LONGS, + SYS_COUNT_RX_CLASSIFIED_DROPS, + SYS_COUNT_TX_OCTETS, + SYS_COUNT_TX_UNICAST, + SYS_COUNT_TX_MULTICAST, + SYS_COUNT_TX_BROADCAST, + SYS_COUNT_TX_COLLISION, + SYS_COUNT_TX_DROPS, + SYS_COUNT_TX_PAUSE, + SYS_COUNT_TX_64, + SYS_COUNT_TX_65_127, + SYS_COUNT_TX_128_511, + SYS_COUNT_TX_512_1023, + SYS_COUNT_TX_1024_1526, + SYS_COUNT_TX_1527_MAX, + SYS_COUNT_TX_AGING, + SYS_RESET_CFG, + SYS_SR_ETYPE_CFG, + SYS_VLAN_ETYPE_CFG, + SYS_PORT_MODE, + SYS_FRONT_PORT_MODE, + SYS_FRM_AGING, + SYS_STAT_CFG, + SYS_SW_STATUS, + SYS_MISC_CFG, + SYS_REW_MAC_HIGH_CFG, + SYS_REW_MAC_LOW_CFG, + SYS_TIMESTAMP_OFFSET, + SYS_CMID, + SYS_PAUSE_CFG, + SYS_PAUSE_TOT_CFG, + SYS_ATOP, + SYS_ATOP_TOT_CFG, + SYS_MAC_FC_CFG, + SYS_MMGT, + SYS_MMGT_FAST, + SYS_EVENTS_DIF, + SYS_EVENTS_CORE, + SYS_CNT, + SYS_PTP_STATUS, + SYS_PTP_TXSTAMP, + SYS_PTP_NXT, + SYS_PTP_CFG, + SYS_RAM_INIT, + SYS_CM_ADDR, + SYS_CM_DATA_WR, + SYS_CM_DATA_RD, + SYS_CM_OP, + SYS_CM_DATA, + HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET, + HSIO_PLL5G_CFG1, + HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG3, + HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG5, + HSIO_PLL5G_CFG6, + HSIO_PLL5G_STATUS0, + HSIO_PLL5G_STATUS1, + HSIO_PLL5G_BIST_CFG0, + HSIO_PLL5G_BIST_CFG1, + HSIO_PLL5G_BIST_CFG2, + HSIO_PLL5G_BIST_STAT0, + HSIO_PLL5G_BIST_STAT1, + HSIO_RCOMP_CFG0, + HSIO_RCOMP_STATUS, + HSIO_SYNC_ETH_CFG, + HSIO_SYNC_ETH_PLL_CFG, + HSIO_S1G_DES_CFG, + HSIO_S1G_IB_CFG, + HSIO_S1G_OB_CFG, + HSIO_S1G_SER_CFG, + HSIO_S1G_COMMON_CFG, + HSIO_S1G_PLL_CFG, + HSIO_S1G_PLL_STATUS, + HSIO_S1G_DFT_CFG0, + HSIO_S1G_DFT_CFG1, + HSIO_S1G_DFT_CFG2, + HSIO_S1G_TP_CFG, + HSIO_S1G_RC_PLL_BIST_CFG, + HSIO_S1G_MISC_CFG, + HSIO_S1G_DFT_STATUS, + HSIO_S1G_MISC_STATUS, + HSIO_MCB_S1G_ADDR_CFG, + HSIO_S6G_DIG_CFG, + HSIO_S6G_DFT_CFG0, + HSIO_S6G_DFT_CFG1, + HSIO_S6G_DFT_CFG2, + HSIO_S6G_TP_CFG0, + HSIO_S6G_TP_CFG1, + HSIO_S6G_RC_PLL_BIST_CFG, + HSIO_S6G_MISC_CFG, + HSIO_S6G_OB_ANEG_CFG, + HSIO_S6G_DFT_STATUS, + HSIO_S6G_ERR_CNT, + HSIO_S6G_MISC_STATUS, + HSIO_S6G_DES_CFG, + HSIO_S6G_IB_CFG, + HSIO_S6G_IB_CFG1, + HSIO_S6G_IB_CFG2, + HSIO_S6G_IB_CFG3, + HSIO_S6G_IB_CFG4, + HSIO_S6G_IB_CFG5, + HSIO_S6G_OB_CFG, + HSIO_S6G_OB_CFG1, + HSIO_S6G_SER_CFG, + HSIO_S6G_COMMON_CFG, + HSIO_S6G_PLL_CFG, + HSIO_S6G_ACJTAG_CFG, + HSIO_S6G_GP_CFG, + HSIO_S6G_IB_STATUS0, + HSIO_S6G_IB_STATUS1, + HSIO_S6G_ACJTAG_STATUS, + HSIO_S6G_PLL_STATUS, + HSIO_S6G_REVID, + HSIO_MCB_S6G_ADDR_CFG, + HSIO_HW_CFG, + HSIO_HW_QSGMII_CFG, + HSIO_HW_QSGMII_STAT, + HSIO_CLK_CFG, + HSIO_TEMP_SENSOR_CTRL, + HSIO_TEMP_SENSOR_CFG, + HSIO_TEMP_SENSOR_STAT, +}; + +enum ocelot_regfield { + ANA_ADVLEARN_VLAN_CHK, + ANA_ADVLEARN_LEARN_MIRROR, + ANA_ANEVENTS_FLOOD_DISCARD, + ANA_ANEVENTS_MSTI_DROP, + ANA_ANEVENTS_ACLKILL, + ANA_ANEVENTS_ACLUSED, + ANA_ANEVENTS_AUTOAGE, + ANA_ANEVENTS_VS2TTL1, + ANA_ANEVENTS_STORM_DROP, + ANA_ANEVENTS_LEARN_DROP, + ANA_ANEVENTS_AGED_ENTRY, + ANA_ANEVENTS_CPU_LEARN_FAILED, + ANA_ANEVENTS_AUTO_LEARN_FAILED, + ANA_ANEVENTS_LEARN_REMOVE, + ANA_ANEVENTS_AUTO_LEARNED, + ANA_ANEVENTS_AUTO_MOVED, + ANA_ANEVENTS_DROPPED, + ANA_ANEVENTS_CLASSIFIED_DROP, + ANA_ANEVENTS_CLASSIFIED_COPY, + ANA_ANEVENTS_VLAN_DISCARD, + ANA_ANEVENTS_FWD_DISCARD, + ANA_ANEVENTS_MULTICAST_FLOOD, + ANA_ANEVENTS_UNICAST_FLOOD, + ANA_ANEVENTS_DEST_KNOWN, + ANA_ANEVENTS_BUCKET3_MATCH, + ANA_ANEVENTS_BUCKET2_MATCH, + ANA_ANEVENTS_BUCKET1_MATCH, + ANA_ANEVENTS_BUCKET0_MATCH, + ANA_ANEVENTS_CPU_OPERATION, + ANA_ANEVENTS_DMAC_LOOKUP, + ANA_ANEVENTS_SMAC_LOOKUP, + ANA_ANEVENTS_SEQ_GEN_ERR_0, + ANA_ANEVENTS_SEQ_GEN_ERR_1, + ANA_TABLES_MACACCESS_B_DOM, + ANA_TABLES_MACTINDX_BUCKET, + ANA_TABLES_MACTINDX_M_INDEX, + QSYS_TIMED_FRAME_ENTRY_TFRM_VLD, + QSYS_TIMED_FRAME_ENTRY_TFRM_FP, + QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO, + QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL, + QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T, + SYS_RESET_CFG_CORE_ENA, + SYS_RESET_CFG_MEM_ENA, + SYS_RESET_CFG_MEM_INIT, + REGFIELD_MAX +}; + +struct ocelot_multicast { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 ports; +}; + +struct ocelot_port; + +struct ocelot_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +struct ocelot { + struct device *dev; + + struct regmap *targets[TARGET_MAX]; + struct regmap_field *regfields[REGFIELD_MAX]; + const u32 *const *map; + const struct ocelot_stat_layout *stats_layout; + unsigned int num_stats; + + u8 base_mac[ETH_ALEN]; + + struct net_device *hw_bridge_dev; + u16 bridge_mask; + u16 bridge_fwd_mask; + + struct workqueue_struct *ocelot_owq; + + int shared_queue_sz; + + u8 num_phys_ports; + u8 num_cpu_ports; + struct ocelot_port **ports; + + u16 lags[16]; + + /* Keep track of the vlan port masks */ + u32 vlan_mask[VLAN_N_VID]; + + struct list_head multicast; + + /* Workqueue to check statistics for overflow with its lock */ + struct mutex stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; +}; + +struct ocelot_port { + struct net_device *dev; + struct ocelot *ocelot; + struct phy_device *phy; + void __iomem *regs; + u8 chip_port; + /* Keep a track of the mc addresses added to the mac table, so that they + * can be removed when needed. + */ + struct list_head mc; + + /* Ingress default VLAN (pvid) */ + u16 pvid; + + /* Egress default VLAN (vid) */ + u16 vid; + + u8 vlan_aware; + + u64 *stats; +}; + +u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); +#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) +#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) +#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0) + +void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); +#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi)) +#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) +#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) + +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, + u32 offset); +#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) +#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) +#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) + +u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); +void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); + +int ocelot_regfields_init(struct ocelot *ocelot, + const struct reg_field *const regfields); +struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, + struct platform_device *pdev, + const char *name); + +#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) + +int ocelot_init(struct ocelot *ocelot); +void ocelot_deinit(struct ocelot *ocelot); +int ocelot_chip_init(struct ocelot *ocelot); +int ocelot_probe_port(struct ocelot *ocelot, u8 port, + void __iomem *regs, + struct phy_device *phy); + +extern struct notifier_block ocelot_netdevice_nb; + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h new file mode 100644 index 000000000000..841c6ec22b64 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_ana.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_ANA_H_ +#define _MSCC_OCELOT_ANA_H_ + +#define ANA_ANAGEFIL_B_DOM_EN BIT(22) +#define ANA_ANAGEFIL_B_DOM_VAL BIT(21) +#define ANA_ANAGEFIL_AGE_LOCKED BIT(20) +#define ANA_ANAGEFIL_PID_EN BIT(19) +#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14)) +#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14) +#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14) +#define ANA_ANAGEFIL_VID_EN BIT(13) +#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0)) +#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0) + +#define ANA_STORMLIMIT_CFG_RSZ 0x4 + +#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3)) +#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3) +#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2) +#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0)) +#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0) + +#define ANA_AUTOAGE_AGE_FAST BIT(21) +#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1)) +#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1) +#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0) + +#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1) +#define ANA_MACTOPTIONS_SHADOW BIT(0) + +#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12)) +#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12) +#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12) +#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11) +#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10) +#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9) +#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8) +#define ANA_AGENCTRL_MIRROR_CPU BIT(7) +#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6) +#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5) +#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4) +#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3) +#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2) +#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1) +#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0) + +#define ANA_FLOODING_RSZ 0x4 + +#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12)) +#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0)) +#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18)) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12)) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0)) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0) + +#define ANA_SFLOW_CFG_RSZ 0x4 + +#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2)) +#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2) +#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2) +#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1) +#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0) + +#define ANA_PORT_MODE_RSZ 0x4 + +#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3) +#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1)) +#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1) +#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0) + +#define ANA_CUT_THRU_CFG_RSZ 0x4 + +#define ANA_PGID_PGID_RSZ 0x4 + +#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0)) +#define ANA_PGID_PGID_PGID_M GENMASK(11, 0) +#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27)) +#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27) +#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27) + +#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16)) +#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16) +#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16) +#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0)) +#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0) + +#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16) +#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9)) +#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9) +#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9) +#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8) +#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0) + +#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15) +#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14) +#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13) +#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12) +#define ANA_TABLES_MACACCESS_VALID BIT(11) +#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9)) +#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9) +#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3)) +#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3) +#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3) +#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0)) +#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0) +#define MACACCESS_CMD_IDLE 0 +#define MACACCESS_CMD_LEARN 1 +#define MACACCESS_CMD_FORGET 2 +#define MACACCESS_CMD_AGE 3 +#define MACACCESS_CMD_GET_NEXT 4 +#define MACACCESS_CMD_INIT 5 +#define MACACCESS_CMD_READ 6 +#define MACACCESS_CMD_WRITE 7 + +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2)) +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2) +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2) +#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0) +#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0 +#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2 +#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3 + +#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17) +#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16) +#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15) +#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14) +#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13) +#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12) +#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0)) +#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0) + +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2)) +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2) +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2) +#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21)) +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21) +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15)) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15) +#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14) +#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10) +#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0) + +#define ANA_TABLES_ENTRYLIM_RSZ 0x4 + +#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14)) +#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14) +#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14) +#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0)) +#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0) + +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4)) +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4) +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4) +#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3) +#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2) +#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30)) +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30) +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30) +#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16)) +#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16) +#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16) +#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8)) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8) +#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7) +#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6) +#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5) +#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0)) +#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0) + +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16)) +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16) +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16) +#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0)) +#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0) + +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1)) +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1) +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0) + +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19)) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2)) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2) +#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26) +#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18)) +#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18) +#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18) +#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17) +#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8)) +#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8) +#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8) +#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0) + +#define ANA_MSTI_STATE_RSZ 0x4 + +#define ANA_OAM_UPM_LM_CNT_RSZ 0x4 + +#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0)) +#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0) +#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28) + +#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16)) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20) +#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24)) +#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24) +#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24) +#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28) + +#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4 + +#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0)) +#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0) +#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4) + +#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4 + +#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0) +#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16) +#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20)) +#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20) +#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24) + +#define ANA_PORT_VLAN_CFG_GSZ 0x100 + +#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21) +#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18)) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18) +#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17) +#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16) +#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15) +#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12)) +#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12) +#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0)) +#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0) + +#define ANA_PORT_DROP_CFG_GSZ 0x100 + +#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5) +#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4) +#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1) +#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) + +#define ANA_PORT_QOS_CFG_GSZ 0x100 + +#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5)) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4) +#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3) +#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2) +#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0) + +#define ANA_PORT_VCAP_CFG_GSZ 0x100 + +#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11)) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8)) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0)) +#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0) + +#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100 +#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4 + +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0) + +#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100 + +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17)) +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17) +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15)) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15) +#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12)) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10)) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2) +#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0) + +#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100 +#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4 + +#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3) +#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0)) +#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0) + +#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7) +#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6) +#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5) +#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4) +#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3) +#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2) +#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1) +#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0) + +#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_PORT_CFG_GSZ 0x100 + +#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15) +#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14) +#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13) +#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12) +#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11) +#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10) +#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9) +#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8) +#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7) +#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6) +#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2)) +#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2) +#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2) +#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1) +#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0) + +#define ANA_PORT_POL_CFG_GSZ 0x100 + +#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19) +#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18) +#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9)) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9) +#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0)) +#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0) + +#define ANA_PORT_PTP_CFG_GSZ 0x100 + +#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0) + +#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100 + +#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100 + +#define ANA_PORT_SFID_CFG_GSZ 0x100 +#define ANA_PORT_SFID_CFG_RSZ 0x4 + +#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8) +#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0)) +#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0) + +#define ANA_PFC_PFC_CFG_GSZ 0x40 + +#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2)) +#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2) +#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2) +#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0) + +#define ANA_PFC_PFC_TIMER_GSZ 0x40 +#define ANA_PFC_PFC_TIMER_RSZ 0x4 + +#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8 + +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6)) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1)) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0) + +#define ANA_IPT_IPT_GSZ 0x8 + +#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15)) +#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15) +#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15) +#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7)) +#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7) +#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7) +#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0)) +#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0) + +#define ANA_PPT_PPT_RSZ 0x4 + +#define ANA_FID_MAP_FID_MAP_RSZ 0x4 + +#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6) +#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0)) +#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0) + +#define ANA_AGGR_CFG_AC_RND_ENA BIT(7) +#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6) +#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1) +#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0) + +#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27)) +#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27) +#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27) +#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24)) +#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24) +#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21)) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18)) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15)) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12)) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9)) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9) +#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6)) +#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6) +#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3)) +#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3) +#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0)) +#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0) + +#define ANA_CPUQ_8021_CFG_RSZ 0x4 + +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6)) +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6) +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3)) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0)) +#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0) + +#define ANA_DSCP_CFG_RSZ 0x4 + +#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11) +#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8)) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2)) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2) +#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1) +#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0) + +#define ANA_DSCP_REWR_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0)) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0) + +#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12) +#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0)) +#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0) + +#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3) +#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2) +#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1) +#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0) + +#define ANA_FID_CFG_VID_MC_ENA BIT(0) + +#define ANA_POL_PIR_CFG_GSZ 0x20 + +#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6) +#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0)) +#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0) + +#define ANA_POL_CIR_CFG_GSZ 0x20 + +#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6) +#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0)) +#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0) + +#define ANA_POL_MODE_CFG_GSZ 0x20 + +#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5)) +#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5) +#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3)) +#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3) +#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2) +#define ANA_POL_MODE_CFG_CIR_ENA BIT(1) +#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0) + +#define ANA_POL_PIR_STATE_GSZ 0x20 + +#define ANA_POL_CIR_STATE_GSZ 0x20 + +#define ANA_POL_STATE_GSZ 0x20 + +#define ANA_POL_FLOWC_RSZ 0x4 + +#define ANA_POL_FLOWC_POL_FLOWC BIT(0) + +#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4)) +#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4) +#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4) +#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0)) +#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0) + +#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1) +#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c new file mode 100644 index 000000000000..18df7d934e81 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include +#include +#include +#include +#include +#include + +#include "ocelot.h" + +static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) +{ + int i; + u8 llen, wlen; + + /* The IFH is in network order, switch to CPU order */ + for (i = 0; i < IFH_LEN; i++) + ifh[i] = ntohl((__force __be32)ifh[i]); + + wlen = (ifh[1] >> 7) & 0xff; + llen = (ifh[1] >> 15) & 0x3f; + info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80; + + info->port = (ifh[2] & GENMASK(14, 11)) >> 11; + + info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; + info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16; + info->vid = ifh[3] & GENMASK(11, 0); + + return 0; +} + +static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, + u32 *rval) +{ + u32 val; + u32 bytes_valid; + + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_NOT_READY) { + if (ifh) + return -EIO; + + do { + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + } while (val == XTR_NOT_READY); + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_ESCAPE) + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return 4; + default: + *rval = val; + + return 4; + } +} + +static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) +{ + struct ocelot *ocelot = arg; + int i = 0, grp = 0; + int err = 0; + + if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))) + return IRQ_NONE; + + do { + struct sk_buff *skb; + struct net_device *dev; + u32 *buf; + int sz, len; + u32 ifh[4]; + u32 val; + struct frame_info info; + + for (i = 0; i < IFH_LEN; i++) { + err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]); + if (err != 4) + break; + } + + if (err != 4) + break; + + ocelot_parse_ifh(ifh, &info); + + dev = ocelot->ports[info.port]->dev; + + skb = netdev_alloc_skb(dev, info.len); + + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; + break; + } + buf = (u32 *)skb_put(skb, info.len); + + len = 0; + do { + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + *buf++ = val; + len += sz; + } while ((sz == 4) && (len < info.len)); + + if (sz < 0) { + err = sz; + break; + } + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded. + */ + if (ocelot->bridge_mask & BIT(info.port)) + skb->offload_fwd_mark = 1; + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)); + + if (err) + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) + ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return IRQ_HANDLED; +} + +static const struct of_device_id mscc_ocelot_match[] = { + { .compatible = "mscc,vsc7514-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, mscc_ocelot_match); + +static int mscc_ocelot_probe(struct platform_device *pdev) +{ + int err, irq; + unsigned int i; + struct device_node *np = pdev->dev.of_node; + struct device_node *ports, *portnp; + struct ocelot *ocelot; + u32 val; + + struct { + enum ocelot_target id; + char *name; + } res[] = { + { SYS, "sys" }, + { REW, "rew" }, + { QSYS, "qsys" }, + { ANA, "ana" }, + { QS, "qs" }, + { HSIO, "hsio" }, + }; + + if (!np && !pdev->dev.platform_data) + return -ENODEV; + + ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL); + if (!ocelot) + return -ENOMEM; + + platform_set_drvdata(pdev, ocelot); + ocelot->dev = &pdev->dev; + + for (i = 0; i < ARRAY_SIZE(res); i++) { + struct regmap *target; + + target = ocelot_io_platform_init(ocelot, pdev, res[i].name); + if (IS_ERR(target)) + return PTR_ERR(target); + + ocelot->targets[res[i].id] = target; + } + + err = ocelot_chip_init(ocelot); + if (err) + return err; + + irq = platform_get_irq_byname(pdev, "xtr"); + if (irq < 0) + return -ENODEV; + + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, + ocelot_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", ocelot); + if (err) + return err; + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + + do { + msleep(1); + regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], + &val); + } while (val); + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); + + ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */ + + ports = of_get_child_by_name(np, "ethernet-ports"); + if (!ports) { + dev_err(&pdev->dev, "no ethernet-ports child node found\n"); + return -ENODEV; + } + + ocelot->num_phys_ports = of_get_child_count(ports); + + ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + + INIT_LIST_HEAD(&ocelot->multicast); + ocelot_init(ocelot); + + ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_6_MODE | + HSIO_HW_CFG_DEV1G_9_MODE, + HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_6_MODE | + HSIO_HW_CFG_DEV1G_9_MODE, + HSIO_HW_CFG); + + for_each_available_child_of_node(ports, portnp) { + struct device_node *phy_node; + struct phy_device *phy; + struct resource *res; + void __iomem *regs; + char res_name[8]; + u32 port; + + if (of_property_read_u32(portnp, "reg", &port)) + continue; + + snprintf(res_name, sizeof(res_name), "port%d", port); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + continue; + + phy_node = of_parse_phandle(portnp, "phy-handle", 0); + if (!phy_node) + continue; + + phy = of_phy_find_device(phy_node); + if (!phy) + continue; + + err = ocelot_probe_port(ocelot, port, regs, phy); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + } + + register_netdevice_notifier(&ocelot_netdevice_nb); + + dev_info(&pdev->dev, "Ocelot switch probed\n"); + + return 0; + +err_probe_ports: + return err; +} + +static int mscc_ocelot_remove(struct platform_device *pdev) +{ + struct ocelot *ocelot = platform_get_drvdata(pdev); + + ocelot_deinit(ocelot); + unregister_netdevice_notifier(&ocelot_netdevice_nb); + + return 0; +} + +static struct platform_driver mscc_ocelot_driver = { + .probe = mscc_ocelot_probe, + .remove = mscc_ocelot_remove, + .driver = { + .name = "ocelot-switch", + .of_match_table = mscc_ocelot_match, + }, +}; + +module_platform_driver(mscc_ocelot_driver); + +MODULE_DESCRIPTION("Microsemi Ocelot switch driver"); +MODULE_AUTHOR("Alexandre Belloni "); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h new file mode 100644 index 000000000000..0a50d53bbd3f --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_dev.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_DEV_H_ +#define _MSCC_OCELOT_DEV_H_ + +#define DEV_CLOCK_CFG 0x0 + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PHY_RST BIT(2) +#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) + +#define DEV_PORT_MISC 0x4 + +#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4) +#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3) +#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2) +#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1) +#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0) + +#define DEV_EVENTS 0x8 + +#define DEV_EEE_CFG 0xc + +#define DEV_EEE_CFG_EEE_ENA BIT(22) +#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) +#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) +#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define DEV_EEE_CFG_PORT_LPI BIT(0) + +#define DEV_RX_PATH_DELAY 0x10 + +#define DEV_TX_PATH_DELAY 0x14 + +#define DEV_PTP_PREDICT_CFG 0x18 + +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4)) +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4) +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4) +#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0)) +#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0) + +#define DEV_MAC_ENA_CFG 0x1c + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) + +#define DEV_MAC_MODE_CFG 0x20 + +#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0) + +#define DEV_MAC_MAXLEN_CFG 0x24 + +#define DEV_MAC_TAGS_CFG 0x28 + +#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) +#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) +#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) +#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) + +#define DEV_MAC_ADV_CHK_CFG 0x2c + +#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) + +#define DEV_MAC_IFG_CFG 0x30 + +#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) +#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) +#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) +#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) + +#define DEV_MAC_HDX_CFG 0x34 + +#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV_MAC_HDX_CFG_OB_ENA BIT(25) +#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24) +#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) +#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) +#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) + +#define DEV_MAC_DBG_CFG 0x38 + +#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4) +#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) + +#define DEV_MAC_FC_MAC_LOW_CFG 0x3c + +#define DEV_MAC_FC_MAC_HIGH_CFG 0x40 + +#define DEV_MAC_STICKY 0x44 + +#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) +#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) +#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) +#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) +#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5) +#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) +#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3) +#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) +#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) +#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0) + +#define PCS1G_CFG 0x48 + +#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4) +#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) +#define PCS1G_CFG_PCS_ENA BIT(0) + +#define PCS1G_MODE_CFG 0x4c + +#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) +#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) + +#define PCS1G_SD_CFG 0x50 + +#define PCS1G_SD_CFG_SD_SEL BIT(8) +#define PCS1G_SD_CFG_SD_POL BIT(4) +#define PCS1G_SD_CFG_SD_ENA BIT(0) + +#define PCS1G_ANEG_CFG 0x54 + +#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16) +#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) +#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0) + +#define PCS1G_ANEG_NP_CFG 0x58 + +#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16) +#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0) + +#define PCS1G_LB_CFG 0x5c + +#define PCS1G_LB_CFG_RA_ENA BIT(4) +#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) +#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) + +#define PCS1G_DBG_CFG 0x60 + +#define PCS1G_DBG_CFG_UDLT BIT(0) + +#define PCS1G_CDET_CFG 0x64 + +#define PCS1G_CDET_CFG_CDET_ENA BIT(0) + +#define PCS1G_ANEG_STATUS 0x68 + +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16) +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_STATUS_PR BIT(4) +#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) +#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) + +#define PCS1G_ANEG_NP_STATUS 0x6c + +#define PCS1G_LINK_STATUS 0x70 + +#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12)) +#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12) +#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12) +#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8) +#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) + +#define PCS1G_LINK_DOWN_CNT 0x74 + +#define PCS1G_STICKY 0x78 + +#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) + +#define PCS1G_DEBUG_STATUS 0x7c + +#define PCS1G_LPI_CFG 0x80 + +#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20) +#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17) +#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16) +#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4)) +#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4) +#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0) + +#define PCS1G_LPI_WAKE_ERROR_CNT 0x84 + +#define PCS1G_LPI_STATUS 0x88 + +#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16) +#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12) +#define PCS1G_LPI_STATUS_RX_QUIET BIT(9) +#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8) +#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4) +#define PCS1G_LPI_STATUS_TX_QUIET BIT(1) +#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0) + +#define PCS1G_TSTPAT_MODE_CFG 0x8c + +#define PCS1G_TSTPAT_STATUS 0x90 + +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8)) +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8) +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4) +#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0) + +#define DEV_PCS_FX100_CFG 0x94 + +#define DEV_PCS_FX100_CFG_SD_SEL BIT(26) +#define DEV_PCS_FX100_CFG_SD_POL BIT(25) +#define DEV_PCS_FX100_CFG_SD_ENA BIT(24) +#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20) +#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16) +#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12)) +#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12) +#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12) +#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9)) +#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9) +#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3) +#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2) +#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1) +#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0) + +#define DEV_PCS_FX100_STATUS 0x98 + +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8)) +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8) +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8) +#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7) +#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6) +#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5) +#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4) +#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2) +#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1) +#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h new file mode 100644 index 000000000000..6aa40ea223a2 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_DEV_GMII_H_ +#define _MSCC_OCELOT_DEV_GMII_H_ + +#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0 + +#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) + +#define DEV_GMII_PORT_MODE_PORT_MISC 0x4 + +#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2) +#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1) +#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0) + +#define DEV_GMII_PORT_MODE_EVENTS 0x8 + +#define DEV_GMII_PORT_MODE_EEE_CFG 0xc + +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0) + +#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10 + +#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14 + +#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0) + +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48 + +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0) +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4) +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8) + +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c + +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4)) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12)) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12) + +#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50 + +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8)) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h new file mode 100644 index 000000000000..d93ddec3931b --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_hsio.h @@ -0,0 +1,785 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_HSIO_H_ +#define _MSCC_OCELOT_HSIO_H_ + +#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) +#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) +#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) +#define HSIO_PLL5G_CFG0_DIV4 BIT(28) +#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27) +#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23)) +#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23) +#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15) +#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14) +#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13) +#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18) +#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17) +#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16) +#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15) +#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5) +#define HSIO_PLL5G_CFG1_PWD_TX BIT(4) +#define HSIO_PLL5G_CFG1_PWD_RX BIT(3) +#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2) +#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1) +#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0) + +#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30) +#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29) +#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28) +#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27) +#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26) +#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24) +#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15) +#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14) +#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13) +#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12) +#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11) +#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10) +#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5) +#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4) +#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3) +#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2) +#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1) +#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0) + +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22)) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18) +#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17) +#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16) +#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15) +#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14) +#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13) +#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12) +#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11) +#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10) +#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9) +#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8) +#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0)) +#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0) + +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23) +#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20)) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20) +#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7) +#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12) +#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11) +#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10) +#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9) +#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0) + +#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21)) +#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21) +#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21) +#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4)) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4) +#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1) +#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0) + +#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31) +#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0) + +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2) +#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0) + +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0) + +#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13) +#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12) +#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10)) +#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10) +#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10) +#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8) +#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4) +#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0) + +#define HSIO_RCOMP_STATUS_BUSY BIT(12) +#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7) +#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0) + +#define HSIO_SYNC_ETH_CFG_RSZ 0x4 + +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0) + +#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0) + +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8)) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4) +#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0) + +#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24)) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24) +#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14) +#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13) +#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12) +#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11) +#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10) +#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17)) +#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17) +#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9) +#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9) +#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8) +#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31) +#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21) +#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18) +#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17) +#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13)) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13) +#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12) +#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11) +#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10) +#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9) +#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8) +#define HSIO_S1G_COMMON_CFG_HRATE BIT(7) +#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0) + +#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22) +#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7) +#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6) +#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5) +#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3) + +#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12) +#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11) +#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0)) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0) + +#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16) +#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7) +#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6) +#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3)) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0) + +#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7) +#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6) +#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4) +#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0) + +#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29)) +#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29) +#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29) +#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24)) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18)) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15)) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5) +#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4) +#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3) +#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2) +#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1) +#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0) + +#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17)) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17) +#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12)) +#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12) +#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12) +#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8)) +#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8) +#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8) +#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7) +#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6) +#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5) +#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4) +#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3) +#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2) +#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1) +#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0) + +#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27)) +#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27) +#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27) +#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22)) +#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22) +#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22) +#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19) +#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16) +#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10) +#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5) +#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3)) +#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3) +#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0) + +#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31) +#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30) +#define HSIO_S6G_OB_CFG_OB_POL BIT(29) +#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18) +#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16) +#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11)) +#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11) +#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11) +#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10) +#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9) +#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8) +#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4) +#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8) +#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17) +#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16) +#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15) +#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14) +#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13) +#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9)) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9) +#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8) +#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7) +#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6) +#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5) +#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4) +#define HSIO_S6G_COMMON_CFG_HRATE BIT(3) +#define HSIO_S6G_COMMON_CFG_QRATE BIT(2) +#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0)) +#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0) + +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15) +#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5) +#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4) +#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3) +#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2) +#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1) +#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0) + +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3) +#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1) +#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0) + +#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16) +#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0)) +#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0) + +#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8) +#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7) +#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6) +#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2) +#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1) +#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0) + +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0) + +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2) +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1) +#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0) + +#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10) +#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9) +#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26)) +#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26) +#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26) +#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21)) +#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21) +#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21) +#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16) +#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10) +#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5) +#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0)) +#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0) + +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0)) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0) + +#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6) +#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5) +#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4) +#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3) +#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2) +#define HSIO_HW_CFG_PCIE_ENA BIT(1) +#define HSIO_HW_CFG_QSGMII_ENA BIT(0) + +#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3) +#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2) +#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1) +#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0) + +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1)) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1) +#define HSIO_HW_QSGMII_STAT_SYNC BIT(0) + +#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0) + +#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1) +#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0) + +#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0) + +#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8) +#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c new file mode 100644 index 000000000000..c6db8ad31fdf --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include +#include +#include + +#include "ocelot.h" + +u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + u32 val; + + WARN_ON(!target); + + regmap_read(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, &val); + return val; +} +EXPORT_SYMBOL(__ocelot_read_ix); + +void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_write(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, val); +} +EXPORT_SYMBOL(__ocelot_write_ix); + +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, + u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_update_bits(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, + mask, val); +} +EXPORT_SYMBOL(__ocelot_rmw_ix); + +u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) +{ + return readl(port->regs + reg); +} +EXPORT_SYMBOL(ocelot_port_readl); + +void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) +{ + writel(val, port->regs + reg); +} +EXPORT_SYMBOL(ocelot_port_writel); + +int ocelot_regfields_init(struct ocelot *ocelot, + const struct reg_field *const regfields) +{ + unsigned int i; + u16 target; + + for (i = 0; i < REGFIELD_MAX; i++) { + struct reg_field regfield = {}; + u32 reg = regfields[i].reg; + + if (!reg) + continue; + + target = regfields[i].reg >> TARGET_OFFSET; + + regfield.reg = ocelot->map[target][reg & REG_MASK]; + regfield.lsb = regfields[i].lsb; + regfield.msb = regfields[i].msb; + + ocelot->regfields[i] = + devm_regmap_field_alloc(ocelot->dev, + ocelot->targets[target], + regfield); + + if (IS_ERR(ocelot->regfields[i])) + return PTR_ERR(ocelot->regfields[i]); + } + + return 0; +} +EXPORT_SYMBOL(ocelot_regfields_init); + +static struct regmap_config ocelot_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, + struct platform_device *pdev, + const char *name) +{ + struct resource *res; + void __iomem *regs; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + regs = devm_ioremap_resource(ocelot->dev, res); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + ocelot_regmap_config.name = name; + return devm_regmap_init_mmio(ocelot->dev, regs, + &ocelot_regmap_config); +} +EXPORT_SYMBOL(ocelot_io_platform_init); diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h new file mode 100644 index 000000000000..d18ae726c01d --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_qs.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_QS_H_ +#define _MSCC_OCELOT_QS_H_ + +/* TODO handle BE */ +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define QS_XTR_GRP_CFG_RSZ 0x4 + +#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1) +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_XTR_RD_RSZ 0x4 + +#define QS_XTR_FRM_PRUNING_RSZ 0x4 + +#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5)) +#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5) +#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2)) +#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2) +#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0)) +#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0) + +#define QS_INJ_GRP_CFG_RSZ 0x4 + +#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_INJ_WR_RSZ 0x4 + +#define QS_INJ_CTRL_RSZ 0x4 + +#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21)) +#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21) +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16)) +#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16) + +#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4)) +#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0)) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0) + +#define QS_INJ_ERR_RSZ 0x4 + +#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1) +#define QS_INJ_ERR_WR_ERR_STICKY BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h new file mode 100644 index 000000000000..aa7267d5ca77 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_qsys.h @@ -0,0 +1,270 @@ +/* + * Microsemi Ocelot Switch driver + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_QSYS_H_ +#define _MSCC_OCELOT_QSYS_H_ + +#define QSYS_PORT_MODE_RSZ 0x4 + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0) + +#define QSYS_SWITCH_PORT_MODE_RSZ 0x4 + +#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11)) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10) +#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1)) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0) + +#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5) +#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4) +#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3) +#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2) +#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1) +#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0) + +#define QSYS_EEE_CFG_RSZ 0x4 + +#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8) +#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0)) +#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0) + +#define QSYS_SW_STATUS_RSZ 0x4 + +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8)) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0)) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0) + +#define QSYS_QMAP_GSZ 0x4 + +#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5)) +#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5) +#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5) +#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2)) +#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2) +#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0)) +#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0) + +#define QSYS_ISDX_SGRP_GSZ 0x4 + +#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4 + +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9)) +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9) +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9) +#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8) +#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7) +#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0)) +#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0) + +#define QSYS_RED_PROFILE_RSZ 0x4 + +#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8) +#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0)) +#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0) + +#define QSYS_RES_CFG_GSZ 0x8 + +#define QSYS_RES_STAT_GSZ 0x8 + +#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12)) +#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12) +#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12) +#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0)) +#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0) + +#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2)) +#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2) +#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0)) +#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0) + +#define QSYS_QMAXSDU_CFG_0_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_1_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_2_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_3_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_4_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_5_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_6_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_7_RSZ 0x4 + +#define QSYS_PREEMPTION_CFG_RSZ 0x4 + +#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0)) +#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8)) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12)) +#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12) +#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_CIR_CFG_GSZ 0x80 + +#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6) +#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0)) +#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0) + +#define QSYS_EIR_CFG_GSZ 0x80 + +#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7)) +#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7) +#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7) +#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1)) +#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1) +#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1) +#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0) + +#define QSYS_SE_CFG_GSZ 0x80 + +#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6)) +#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6) +#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6) +#define QSYS_SE_CFG_SE_RR_ENA BIT(5) +#define QSYS_SE_CFG_SE_AVB_ENA BIT(4) +#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2) +#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QSYS_SE_CFG_SE_EXC_ENA BIT(1) +#define QSYS_SE_CFG_SE_EXC_FWD BIT(0) + +#define QSYS_SE_DWRR_CFG_GSZ 0x80 +#define QSYS_SE_DWRR_CFG_RSZ 0x4 + +#define QSYS_SE_CONNECT_GSZ 0x80 + +#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17)) +#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17) +#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17) +#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9)) +#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9) +#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9) +#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5)) +#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5) +#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5) +#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1)) +#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1) +#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1) +#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0) + +#define QSYS_SE_DLB_SENSE_GSZ 0x80 + +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11)) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7)) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3)) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0) + +#define QSYS_CIR_STATE_GSZ 0x80 + +#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4)) +#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4) +#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4) +#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0)) +#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0) + +#define QSYS_EIR_STATE_GSZ 0x80 + +#define QSYS_SE_STATE_GSZ 0x80 + +#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1)) +#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1) +#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define QSYS_SE_STATE_SE_WAS_YEL BIT(0) + +#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3)) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3) +#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2) +#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1) +#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0) + +#define QSYS_TAG_CONFIG_RSZ 0x4 + +#define QSYS_TAG_CONFIG_ENABLE BIT(0) +#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4)) +#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4) +#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16)) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16) + +#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0)) +#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0) +#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8) +#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16) + +#define QSYS_PORT_MAX_SDU_RSZ 0x4 + +#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0)) +#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0) +#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8) +#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) + +#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16)) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24) + +#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0)) +#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c new file mode 100644 index 000000000000..e334b406c40c --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include "ocelot.h" + +static const u32 ocelot_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; + +static const u32 ocelot_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; + +static const u32 ocelot_hsio_regmap[] = { + REG(HSIO_PLL5G_CFG0, 0x000000), + REG(HSIO_PLL5G_CFG1, 0x000004), + REG(HSIO_PLL5G_CFG2, 0x000008), + REG(HSIO_PLL5G_CFG3, 0x00000c), + REG(HSIO_PLL5G_CFG4, 0x000010), + REG(HSIO_PLL5G_CFG5, 0x000014), + REG(HSIO_PLL5G_CFG6, 0x000018), + REG(HSIO_PLL5G_STATUS0, 0x00001c), + REG(HSIO_PLL5G_STATUS1, 0x000020), + REG(HSIO_PLL5G_BIST_CFG0, 0x000024), + REG(HSIO_PLL5G_BIST_CFG1, 0x000028), + REG(HSIO_PLL5G_BIST_CFG2, 0x00002c), + REG(HSIO_PLL5G_BIST_STAT0, 0x000030), + REG(HSIO_PLL5G_BIST_STAT1, 0x000034), + REG(HSIO_RCOMP_CFG0, 0x000038), + REG(HSIO_RCOMP_STATUS, 0x00003c), + REG(HSIO_SYNC_ETH_CFG, 0x000040), + REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048), + REG(HSIO_S1G_DES_CFG, 0x00004c), + REG(HSIO_S1G_IB_CFG, 0x000050), + REG(HSIO_S1G_OB_CFG, 0x000054), + REG(HSIO_S1G_SER_CFG, 0x000058), + REG(HSIO_S1G_COMMON_CFG, 0x00005c), + REG(HSIO_S1G_PLL_CFG, 0x000060), + REG(HSIO_S1G_PLL_STATUS, 0x000064), + REG(HSIO_S1G_DFT_CFG0, 0x000068), + REG(HSIO_S1G_DFT_CFG1, 0x00006c), + REG(HSIO_S1G_DFT_CFG2, 0x000070), + REG(HSIO_S1G_TP_CFG, 0x000074), + REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078), + REG(HSIO_S1G_MISC_CFG, 0x00007c), + REG(HSIO_S1G_DFT_STATUS, 0x000080), + REG(HSIO_S1G_MISC_STATUS, 0x000084), + REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088), + REG(HSIO_S6G_DIG_CFG, 0x00008c), + REG(HSIO_S6G_DFT_CFG0, 0x000090), + REG(HSIO_S6G_DFT_CFG1, 0x000094), + REG(HSIO_S6G_DFT_CFG2, 0x000098), + REG(HSIO_S6G_TP_CFG0, 0x00009c), + REG(HSIO_S6G_TP_CFG1, 0x0000a0), + REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4), + REG(HSIO_S6G_MISC_CFG, 0x0000a8), + REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac), + REG(HSIO_S6G_DFT_STATUS, 0x0000b0), + REG(HSIO_S6G_ERR_CNT, 0x0000b4), + REG(HSIO_S6G_MISC_STATUS, 0x0000b8), + REG(HSIO_S6G_DES_CFG, 0x0000bc), + REG(HSIO_S6G_IB_CFG, 0x0000c0), + REG(HSIO_S6G_IB_CFG1, 0x0000c4), + REG(HSIO_S6G_IB_CFG2, 0x0000c8), + REG(HSIO_S6G_IB_CFG3, 0x0000cc), + REG(HSIO_S6G_IB_CFG4, 0x0000d0), + REG(HSIO_S6G_IB_CFG5, 0x0000d4), + REG(HSIO_S6G_OB_CFG, 0x0000d8), + REG(HSIO_S6G_OB_CFG1, 0x0000dc), + REG(HSIO_S6G_SER_CFG, 0x0000e0), + REG(HSIO_S6G_COMMON_CFG, 0x0000e4), + REG(HSIO_S6G_PLL_CFG, 0x0000e8), + REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec), + REG(HSIO_S6G_GP_CFG, 0x0000f0), + REG(HSIO_S6G_IB_STATUS0, 0x0000f4), + REG(HSIO_S6G_IB_STATUS1, 0x0000f8), + REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc), + REG(HSIO_S6G_PLL_STATUS, 0x000100), + REG(HSIO_S6G_REVID, 0x000104), + REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108), + REG(HSIO_HW_CFG, 0x00010c), + REG(HSIO_HW_QSGMII_CFG, 0x000110), + REG(HSIO_HW_QSGMII_STAT, 0x000114), + REG(HSIO_CLK_CFG, 0x000118), + REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c), + REG(HSIO_TEMP_SENSOR_CFG, 0x000120), + REG(HSIO_TEMP_SENSOR_STAT, 0x000124), +}; + +static const u32 ocelot_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; + +static const u32 ocelot_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; + +static const u32 ocelot_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_PAUSE, 0x00003c), + REG(SYS_COUNT_RX_CONTROL, 0x000040), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000170), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_CNT, 0x000000), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; + +static const u32 *ocelot_regmap[] = { + [ANA] = ocelot_ana_regmap, + [QS] = ocelot_qs_regmap, + [HSIO] = ocelot_hsio_regmap, + [QSYS] = ocelot_qsys_regmap, + [REW] = ocelot_rew_regmap, + [SYS] = ocelot_sys_regmap, +}; + +static const struct reg_field ocelot_regfields[] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), + [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), + [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9), + [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20), + [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19), + [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), +}; + +static const struct ocelot_stat_layout ocelot_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02, }, + { .name = "rx_broadcast", .offset = 0x03, }, + { .name = "rx_shorts", .offset = 0x04, }, + { .name = "rx_fragments", .offset = 0x05, }, + { .name = "rx_jabbers", .offset = 0x06, }, + { .name = "rx_crc_align_errs", .offset = 0x07, }, + { .name = "rx_sym_errs", .offset = 0x08, }, + { .name = "rx_frames_below_65_octets", .offset = 0x09, }, + { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, }, + { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, }, + { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, }, + { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, }, + { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, }, + { .name = "rx_frames_over_1526_octets", .offset = 0x0F, }, + { .name = "rx_pause", .offset = 0x10, }, + { .name = "rx_control", .offset = 0x11, }, + { .name = "rx_longs", .offset = 0x12, }, + { .name = "rx_classified_drops", .offset = 0x13, }, + { .name = "rx_red_prio_0", .offset = 0x14, }, + { .name = "rx_red_prio_1", .offset = 0x15, }, + { .name = "rx_red_prio_2", .offset = 0x16, }, + { .name = "rx_red_prio_3", .offset = 0x17, }, + { .name = "rx_red_prio_4", .offset = 0x18, }, + { .name = "rx_red_prio_5", .offset = 0x19, }, + { .name = "rx_red_prio_6", .offset = 0x1A, }, + { .name = "rx_red_prio_7", .offset = 0x1B, }, + { .name = "rx_yellow_prio_0", .offset = 0x1C, }, + { .name = "rx_yellow_prio_1", .offset = 0x1D, }, + { .name = "rx_yellow_prio_2", .offset = 0x1E, }, + { .name = "rx_yellow_prio_3", .offset = 0x1F, }, + { .name = "rx_yellow_prio_4", .offset = 0x20, }, + { .name = "rx_yellow_prio_5", .offset = 0x21, }, + { .name = "rx_yellow_prio_6", .offset = 0x22, }, + { .name = "rx_yellow_prio_7", .offset = 0x23, }, + { .name = "rx_green_prio_0", .offset = 0x24, }, + { .name = "rx_green_prio_1", .offset = 0x25, }, + { .name = "rx_green_prio_2", .offset = 0x26, }, + { .name = "rx_green_prio_3", .offset = 0x27, }, + { .name = "rx_green_prio_4", .offset = 0x28, }, + { .name = "rx_green_prio_5", .offset = 0x29, }, + { .name = "rx_green_prio_6", .offset = 0x2A, }, + { .name = "rx_green_prio_7", .offset = 0x2B, }, + { .name = "tx_octets", .offset = 0x40, }, + { .name = "tx_unicast", .offset = 0x41, }, + { .name = "tx_multicast", .offset = 0x42, }, + { .name = "tx_broadcast", .offset = 0x43, }, + { .name = "tx_collision", .offset = 0x44, }, + { .name = "tx_drops", .offset = 0x45, }, + { .name = "tx_pause", .offset = 0x46, }, + { .name = "tx_frames_below_65_octets", .offset = 0x47, }, + { .name = "tx_frames_65_to_127_octets", .offset = 0x48, }, + { .name = "tx_frames_128_255_octets", .offset = 0x49, }, + { .name = "tx_frames_256_511_octets", .offset = 0x4A, }, + { .name = "tx_frames_512_1023_octets", .offset = 0x4B, }, + { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, }, + { .name = "tx_frames_over_1526_octets", .offset = 0x4D, }, + { .name = "tx_yellow_prio_0", .offset = 0x4E, }, + { .name = "tx_yellow_prio_1", .offset = 0x4F, }, + { .name = "tx_yellow_prio_2", .offset = 0x50, }, + { .name = "tx_yellow_prio_3", .offset = 0x51, }, + { .name = "tx_yellow_prio_4", .offset = 0x52, }, + { .name = "tx_yellow_prio_5", .offset = 0x53, }, + { .name = "tx_yellow_prio_6", .offset = 0x54, }, + { .name = "tx_yellow_prio_7", .offset = 0x55, }, + { .name = "tx_green_prio_0", .offset = 0x56, }, + { .name = "tx_green_prio_1", .offset = 0x57, }, + { .name = "tx_green_prio_2", .offset = 0x58, }, + { .name = "tx_green_prio_3", .offset = 0x59, }, + { .name = "tx_green_prio_4", .offset = 0x5A, }, + { .name = "tx_green_prio_5", .offset = 0x5B, }, + { .name = "tx_green_prio_6", .offset = 0x5C, }, + { .name = "tx_green_prio_7", .offset = 0x5D, }, + { .name = "tx_aged", .offset = 0x5E, }, + { .name = "drop_local", .offset = 0x80, }, + { .name = "drop_tail", .offset = 0x81, }, + { .name = "drop_yellow_prio_0", .offset = 0x82, }, + { .name = "drop_yellow_prio_1", .offset = 0x83, }, + { .name = "drop_yellow_prio_2", .offset = 0x84, }, + { .name = "drop_yellow_prio_3", .offset = 0x85, }, + { .name = "drop_yellow_prio_4", .offset = 0x86, }, + { .name = "drop_yellow_prio_5", .offset = 0x87, }, + { .name = "drop_yellow_prio_6", .offset = 0x88, }, + { .name = "drop_yellow_prio_7", .offset = 0x89, }, + { .name = "drop_green_prio_0", .offset = 0x8A, }, + { .name = "drop_green_prio_1", .offset = 0x8B, }, + { .name = "drop_green_prio_2", .offset = 0x8C, }, + { .name = "drop_green_prio_3", .offset = 0x8D, }, + { .name = "drop_green_prio_4", .offset = 0x8E, }, + { .name = "drop_green_prio_5", .offset = 0x8F, }, + { .name = "drop_green_prio_6", .offset = 0x90, }, + { .name = "drop_green_prio_7", .offset = 0x91, }, +}; + +static void ocelot_pll5_init(struct ocelot *ocelot) +{ + /* Configure PLL5. This will need a proper CCF driver + * The values are coming from the VTSS API for Ocelot + */ + ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4); + ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | + HSIO_PLL5G_CFG0_ENA_BIAS | + HSIO_PLL5G_CFG0_ENA_VCO_BUF | + HSIO_PLL5G_CFG0_ENA_CP1 | + HSIO_PLL5G_CFG0_SELCPI(2) | + HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) | + HSIO_PLL5G_CFG0_SELBGV820(4) | + HSIO_PLL5G_CFG0_DIV4 | + HSIO_PLL5G_CFG0_ENA_CLKTREE | + HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0); + ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | + HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | + HSIO_PLL5G_CFG2_ENA_AMPCTRL | + HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | + HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2); +} + +int ocelot_chip_init(struct ocelot *ocelot) +{ + int ret; + + ocelot->map = ocelot_regmap; + ocelot->stats_layout = ocelot_stats_layout; + ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); + ocelot->shared_queue_sz = 224 * 1024; + + ret = ocelot_regfields_init(ocelot, ocelot_regfields); + if (ret) + return ret; + + ocelot_pll5_init(ocelot); + + eth_random_addr(ocelot->base_mac); + ocelot->base_mac[5] &= 0xf0; + + return 0; +} +EXPORT_SYMBOL(ocelot_chip_init); diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h new file mode 100644 index 000000000000..210914b7e20f --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_rew.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_REW_H_ +#define _MSCC_OCELOT_REW_H_ + +#define REW_PORT_VLAN_CFG_GSZ 0x80 + +#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16)) +#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15) +#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12)) +#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12) +#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0)) +#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0) + +#define REW_TAG_CFG_GSZ 0x80 + +#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7)) +#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5)) +#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5) +#define REW_TAG_CFG_TAG_VID_CFG BIT(4) +#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2)) +#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2) +#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0)) +#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0) + +#define REW_PORT_CFG_GSZ 0x80 + +#define REW_PORT_CFG_ES0_EN BIT(5) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3)) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2) +#define REW_PORT_CFG_FLUSH_ENA BIT(1) +#define REW_PORT_CFG_AGE_DIS BIT(0) + +#define REW_DSCP_CFG_GSZ 0x80 + +#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80 +#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4 + +#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0)) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0) + +#define REW_PTP_CFG_GSZ 0x80 + +#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7) +#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3)) +#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3) +#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2) +#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1) +#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0) + +#define REW_PTP_DLY1_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0) + +#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4 + +#define REW_DSCP_REMAP_CFG_RSZ 0x4 + +#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0) + +#define REW_PPT_RSZ 0x4 + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h new file mode 100644 index 000000000000..16f91e172bcb --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_sys.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_SYS_H_ +#define _MSCC_OCELOT_SYS_H_ + +#define SYS_COUNT_RX_OCTETS_RSZ 0x4 + +#define SYS_COUNT_TX_OCTETS_RSZ 0x4 + +#define SYS_PORT_MODE_RSZ 0x4 + +#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5)) +#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5) +#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5) +#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3)) +#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3) +#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1)) +#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1) +#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0) + +#define SYS_FRONT_PORT_MODE_RSZ 0x4 + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0)) +#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0) + +#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10)) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10) +#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0)) +#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0) + +#define SYS_SW_STATUS_RSZ 0x4 + +#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0) + +#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1) +#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0) + +#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4 + +#define SYS_REW_MAC_LOW_CFG_RSZ 0x4 + +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6)) +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6) +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6) +#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0)) +#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0) + +#define SYS_PAUSE_CFG_RSZ 0x4 + +#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10)) +#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10) +#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1)) +#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1) +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) + +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9)) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0)) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0) + +#define SYS_ATOP_RSZ 0x4 + +#define SYS_MAC_FC_CFG_RSZ 0x4 + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26)) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20)) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0)) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0) + +#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16)) +#define SYS_MMGT_RELCNT_M GENMASK(31, 16) +#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0)) +#define SYS_MMGT_FREECNT_M GENMASK(15, 0) + +#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4)) +#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4) +#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0)) +#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0) + +#define SYS_EVENTS_DIF_RSZ 0x4 + +#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6)) +#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6) +#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0)) +#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0) + +#define SYS_EVENTS_CORE_EV_FWR BIT(2) +#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0)) +#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0) + +#define SYS_CNT_GSZ 0x4 + +#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29) +#define SYS_PTP_STATUS_PTP_OVFL BIT(28) +#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27) +#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21)) +#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21) +#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16)) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0)) +#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0) + +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0)) +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0) +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31) + +#define SYS_PTP_NXT_PTP_NXT BIT(0) + +#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2)) +#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2) +#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2) +#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0)) +#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0) + +#endif -- cgit v1.2.3 From 2312e050f42b0fcdc8a49bd11df1d3015859f2ab Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:05 +0100 Subject: net: hns3: Fix for deadlock problem occurring when unregistering ae_algo When hnae3_unregister_ae_algo is called by PF, pci_disable_sriov is called. And then, hns3_remove is called by VF. We get deadlocked in this case. Since VF pci device is dependent on PF pci device, When PF pci device is removed, VF pci device must be removed. Also, To solve the deadlock problem, VF pci device should be removed before PF pci device is removed. This patch moves pci_enable/disable_sriov from hclge to hns3 to solve the deadlock problem. Also, we do not need to return EPROBE_DEFER in hnae3_register_ae_dev, because SRIOV is no longer enabled in the context calling hnae3_register_ae_dev. Mutex_trylock can be replaced with mutex_lock. Fixes: 424eb834a9be ("net: hns3: Unified HNS3 {VF|PF} Ethernet Driver for hip08 SoC") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 12 +--- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 74 +++++++++++++++++++++- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 42 ++---------- 3 files changed, 79 insertions(+), 49 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 02145f2de820..1686cebead96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -196,17 +196,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0, lock_acquired; + int ret = 0; - /* we can get deadlocked if SRIOV is being enabled in context to probe - * and probe gets called again in same context. This can happen when - * pci_enable_sriov() is called to create VFs from PF probes context. - * Therefore, for simplicity uniformly defering further probing in all - * cases where we detect contention. - */ - lock_acquired = mutex_trylock(&hnae3_common_lock); - if (!lock_acquired) - return -EPROBE_DEFER; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4031174181a0..af9e90f87109 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1487,6 +1487,68 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; +static bool hns3_is_phys_func(struct pci_dev *pdev) +{ + u32 dev_id = pdev->device; + + switch (dev_id) { + case HNAE3_DEV_ID_GE: + case HNAE3_DEV_ID_25GE: + case HNAE3_DEV_ID_25GE_RDMA: + case HNAE3_DEV_ID_25GE_RDMA_MACSEC: + case HNAE3_DEV_ID_50GE_RDMA: + case HNAE3_DEV_ID_50GE_RDMA_MACSEC: + case HNAE3_DEV_ID_100G_RDMA_MACSEC: + return true; + case HNAE3_DEV_ID_100G_VF: + case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: + return false; + default: + dev_warn(&pdev->dev, "un-recognized pci device-id %d", + dev_id); + } + + return false; +} + +static int get_num_req_vfs(struct pci_dev *pdev) +{ + /* a variable vf num will be supported later */ + return pci_sriov_get_totalvfs(pdev); +} + +static void hns3_enable_sriov(struct pci_dev *pdev) +{ + int num_req_vfs = get_num_req_vfs(pdev); + int ret; + + /* Enable SRIOV */ + if (!num_req_vfs) + return; + + dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", + num_req_vfs); + + ret = pci_enable_sriov(pdev, num_req_vfs); + if (ret) + dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); +} + +static void hns3_disable_sriov(struct pci_dev *pdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(pdev); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1514,7 +1576,14 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); - return hnae3_register_ae_dev(ae_dev); + ret = hnae3_register_ae_dev(ae_dev); + if (ret) + return ret; + + if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) + hns3_enable_sriov(pdev); + + return 0; } /* hns3_remove - Device removal routine @@ -1524,6 +1593,9 @@ static void hns3_remove(struct pci_dev *pdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) + hns3_disable_sriov(pdev); + hnae3_unregister_ae_dev(ae_dev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 316ec8427891..343197a57240 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1473,21 +1473,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) hdev->vport = vport; hdev->num_alloc_vport = num_vport; -#ifdef CONFIG_PCI_IOV - /* Enable SRIOV */ - if (hdev->num_req_vfs) { - dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", - hdev->num_req_vfs); - ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); - if (ret) { - hdev->num_alloc_vfs = 0; - dev_err(&pdev->dev, "SRIOV enable failed %d\n", - ret); - return ret; - } - } - hdev->num_alloc_vfs = hdev->num_req_vfs; -#endif + if (IS_ENABLED(CONFIG_PCI_IOV)) + hdev->num_alloc_vfs = hdev->num_req_vfs; for (i = 0; i < num_vport; i++) { vport->back = hdev; @@ -2946,21 +2933,6 @@ static void hclge_service_task(struct work_struct *work) hclge_service_complete(hdev); } -static void hclge_disable_sriov(struct hclge_dev *hdev) -{ - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(hdev->pdev)) { - dev_warn(&hdev->pdev->dev, - "disabling driver while VFs are assigned\n"); - return; - } - - pci_disable_sriov(hdev->pdev); -} - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) { /* VF handle has no client */ @@ -5540,7 +5512,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); - goto err_sriov_disable; + goto err_msi_irq_uninit; } if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { @@ -5548,7 +5520,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) { dev_err(&hdev->pdev->dev, "mdio config fail ret=%d\n", ret); - goto err_sriov_disable; + goto err_msi_irq_uninit; } } @@ -5612,9 +5584,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) err_mdiobus_unreg: if (hdev->hw.mac.phydev) mdiobus_unregister(hdev->hw.mac.mdio_bus); -err_sriov_disable: - if (IS_ENABLED(CONFIG_PCI_IOV)) - hclge_disable_sriov(hdev); err_msi_irq_uninit: hclge_misc_irq_uninit(hdev); err_msi_uninit: @@ -5717,9 +5686,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); - if (IS_ENABLED(CONFIG_PCI_IOV)) - hclge_disable_sriov(hdev); - if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) -- cgit v1.2.3 From 3e249d3bed677f18a8a9b87b2b9cf1beaeef886c Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:06 +0100 Subject: net: hns3: Fix for the null pointer problem occurring when initializing ae_dev failed When initializing ae_dev failed during loading hclge.ko, the drvdata will be set to null. When removing hns3.ko, we get a null ae_dev. It causes the null pointer problem. This patch removes pci_set_drvdata from error handle of hclge_init_ae_dev to fix the bug, since pci_set_drvdata has been called in hns3_remove. Also, we do not need to uninit the ae_dev which is not initialized. And it may be the one which is initialized failed. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 6 ++++++ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 +---- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 6 ++---- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 1686cebead96..ab2e72ccceb1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -168,6 +168,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!id) continue; @@ -256,6 +259,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!id) continue; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 343197a57240..c2501b1ec819 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5379,7 +5379,7 @@ static int hclge_pci_init(struct hclge_dev *hdev) ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); - goto err_no_drvdata; + return ret; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -5417,8 +5417,6 @@ err_clr_master: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); -err_no_drvdata: - pci_set_drvdata(pdev, NULL); return ret; } @@ -5594,7 +5592,6 @@ err_pci_uninit: pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); out: return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2dbffcef7109..b7578c695d1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1563,7 +1563,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); - goto err_no_drvdata; + return ret; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -1595,8 +1595,7 @@ err_clr_master: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); -err_no_drvdata: - pci_set_drvdata(pdev, NULL); + return ret; } @@ -1608,7 +1607,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static int hclgevf_init_hdev(struct hclgevf_dev *hdev) -- cgit v1.2.3 From e3afa96365c916319a1068ac50b838b2256cc6df Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:07 +0100 Subject: net: hns3: Add a check for client instance init state If the client instance is initializd failed, we do not need to uninit it. This patch adds a state check to check init state of client instance. Fixes: 38caee9d3ee8 ("net: hns3: Add support of the HNAE3 framework") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 15 ++++++++++++--- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 1 + 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index ab2e72ccceb1..21cb0c5e5b31 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -50,13 +50,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) + if (ret) { dev_err(&ae_dev->pdev->dev, "fail to instantiate client\n"); - return ret; + return ret; + } + + hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1); + return 0; + } + + if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) { + ae_dev->ops->uninit_client_instance(client, ae_dev); + + hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0); } - ae_dev->ops->uninit_client_instance(client, ae_dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 804ea83e1713..ffec2318cf50 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -52,6 +52,7 @@ #define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_DCB_B 0x2 +#define HNAE3_CLIENT_INITED_B 0x3 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) -- cgit v1.2.3 From 50fbc237b75720ca08d1fd9c6408cfe5c2217bbf Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:08 +0100 Subject: net: hns3: Change return type of hnae3_register_ae_dev If hclge.ko has not been inserted, the value of ret always is zero in hnae3_register_ae_dev. If hclge.ko has been inserted, the value of ret is zero or non zero. Different execution ways have different results. It is confusing. The ae_dev which is initialized failed can be reinitialized when we remove hclge.ko and insert it again. For the case initializing client instance, it is just like the case initializing ae_dev. The main function of hnae3_register_ae_dev is adding the ae_dev to ad_dev list. Because adding ae_dev is always ok, we does not need to return any in this function. This patch changes the return type of hnae3_register_ae_dev from int to void. Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 5 +---- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 +--- 3 files changed, 3 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 21cb0c5e5b31..cb93295c9f62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -203,7 +203,7 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); * @ae_dev: the AE device * NOTE: the duplicated name will not be checked */ -int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) { const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; @@ -224,7 +224,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!ae_dev->ops) { dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); - ret = -EOPNOTSUPP; goto out_err; } @@ -251,8 +250,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) out_err: mutex_unlock(&hnae3_common_lock); - - return ret; } EXPORT_SYMBOL(hnae3_register_ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index ffec2318cf50..ea6e6ead28a1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -515,7 +515,7 @@ struct hnae3_handle { #define hnae_get_bit(origin, shift) \ hnae_get_field((origin), (0x1 << (shift)), (shift)) -int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index af9e90f87109..ac75b5d6a252 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1576,9 +1576,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); - ret = hnae3_register_ae_dev(ae_dev); - if (ret) - return ret; + hnae3_register_ae_dev(ae_dev); if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) hns3_enable_sriov(pdev); -- cgit v1.2.3 From 854cf33a63106667fea7265b9c222566dbb5edc6 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:09 +0100 Subject: net: hns3: Change return type of hnae3_register_ae_algo The ae_algo is used by many ae_devs. It is not only belong to just a ae_dev. Initializing ae_dev failed does not represent registering ae_algo failed. Because the action of registering ae_algo just is adding ae_algo to the ae_algo list and it is always is true, it make no sense to define return type as int. This patch changes the return type of hnae3_register_ae_algo from int to void. Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 4 +--- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 +++- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 4 +++- 4 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index cb93295c9f62..3b1c3966b7d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -121,7 +121,7 @@ EXPORT_SYMBOL(hnae3_unregister_client); * @ae_algo: AE algorithm * NOTE: the duplicated name will not be checked */ -int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) { const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; @@ -160,8 +160,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) } mutex_unlock(&hnae3_common_lock); - - return ret; } EXPORT_SYMBOL(hnae3_register_ae_algo); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index ea6e6ead28a1..2f266ef96582 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -519,7 +519,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); -int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c2501b1ec819..d060903f47e0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6250,7 +6250,9 @@ static int hclge_init(void) { pr_info("%s is initializing\n", HCLGE_NAME); - return hnae3_register_ae_algo(&ae_algo); + hnae3_register_ae_algo(&ae_algo); + + return 0; } static void hclge_exit(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index b7578c695d1c..f1f4a17340a5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1852,7 +1852,9 @@ static int hclgevf_init(void) { pr_info("%s is initializing\n", HCLGEVF_NAME); - return hnae3_register_ae_algo(&ae_algovf); + hnae3_register_ae_algo(&ae_algovf); + + return 0; } static void hclgevf_exit(void) -- cgit v1.2.3 From 0c698257c7befa8d1ec1b8d767758c3d73a2686a Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:10 +0100 Subject: net: hns3: Change return value in hnae3_register_client A client includes many client instance. Just like ae_algo, Initializing client instance failed does not represent registering client failed. The action of registering client just is adding client to the client list and the result always is true. This patch changes the return value of hnae3_register_client form a variable value to a fixed value, makes the function always return ok. Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 3b1c3966b7d4..bd3c232234d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -98,7 +98,7 @@ int hnae3_register_client(struct hnae3_client *client) exit: mutex_unlock(&hnae3_common_lock); - return ret; + return 0; } EXPORT_SYMBOL(hnae3_register_client); -- cgit v1.2.3 From 67bf2541f4b9a091e928d75eca544bca4c5db142 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 15 May 2018 19:20:11 +0100 Subject: net: hns3: Fixes the back pressure setting when sriov is enabled When sriov is enabled, the Qset and tc mapping is not longer one to one relation. This patch fixes it by mapping all pf and vf's Qset to tc. Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 45 +++++++++++++++++++--- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 5 +++ 2 files changed, 45 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index c69ecab460f9..262c125f8137 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -500,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, + u32 bit_map) { struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; struct hclge_desc desc; @@ -511,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd->tc_id = tc; - - /* Qset and tc is one by one mapping */ - bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc); + bp_to_qs_map_cmd->qs_group_id = grp_id; + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -1167,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) hdev->tm_info.hw_pfc_map); } +/* Each Tc has a 1024 queue sets to backpress, it divides to + * 32 group, each group contains 32 queue sets, which can be + * represented by u32 bitmap. + */ +static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) +{ + struct hclge_vport *vport = hdev->vport; + u32 i, k, qs_bitmap; + int ret; + + for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { + qs_bitmap = 0; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + u16 qs_id = vport->qs_offset + tc; + u8 grp, sub_grp; + + grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); + if (i == grp) + qs_bitmap |= (1 << sub_grp); + + vport++; + } + + ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); + if (ret) + return ret; + } + + return 0; +} + static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) { bool tx_en, rx_en; @@ -1218,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_tm_qs_bp_cfg(hdev, i); + ret = hclge_bp_setup_hw(hdev, i); if (ret) return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 2dbe177581e9..c2b6e8a6700f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd { __le32 pg_shapping_para; }; +#define HCLGE_BP_GRP_NUM 32 +#define HCLGE_BP_SUB_GRP_ID_S 0 +#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) +#define HCLGE_BP_GRP_ID_S 5 +#define HCLGE_BP_GRP_ID_M GENMASK(9, 5) struct hclge_bp_to_qs_map_cmd { u8 tc_id; u8 rsvd[2]; -- cgit v1.2.3 From be8d8cdb8ebf3afd841c109dd035fd789a0c7d53 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 15 May 2018 19:20:12 +0100 Subject: net: hns3: Fix for fiber link up problem When hclge_ae_start is called, hdev->hw.mac.link may be set to one after up/down multi-times, which does not correspond to the link state of netdev when the netdev is up. This fixes it by setting hdev->hw.mac.link to zero when hclge_ae_start is called. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d060903f47e0..75d9b8c5793a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3756,6 +3756,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); mod_timer(&hdev->service_timer, jiffies + HZ); + hdev->hw.mac.link = 0; /* reset tqp stats */ hclge_reset_tqp_stats(handle); @@ -3792,7 +3793,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - hclge_update_link_status(hdev); } static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, -- cgit v1.2.3 From fa8d82e853e84de998e91e95d7a09aa04a7f79ee Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 15 May 2018 19:20:13 +0100 Subject: net: hns3: Add support of .sriov_configure in HNS3 driver As HNS3 driver will enable SRIOV default and enable all VFs the HW support, if PF and VF driver compiled to kernel, VF driver will work on host default, it is not right. This patch adds support for hns3_driver.sriov_configure to support user configs the VF_num, and do not enable sriov default. Signed-off-by: Peng Li Suggested-by: Zhou Wang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 58 ++++++++++++++----------- 1 file changed, 32 insertions(+), 26 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ac75b5d6a252..e85ff3805c96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1511,29 +1511,6 @@ static bool hns3_is_phys_func(struct pci_dev *pdev) return false; } -static int get_num_req_vfs(struct pci_dev *pdev) -{ - /* a variable vf num will be supported later */ - return pci_sriov_get_totalvfs(pdev); -} - -static void hns3_enable_sriov(struct pci_dev *pdev) -{ - int num_req_vfs = get_num_req_vfs(pdev); - int ret; - - /* Enable SRIOV */ - if (!num_req_vfs) - return; - - dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", - num_req_vfs); - - ret = pci_enable_sriov(pdev, num_req_vfs); - if (ret) - dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); -} - static void hns3_disable_sriov(struct pci_dev *pdev) { /* If our VFs are assigned we cannot shut down SR-IOV @@ -1578,9 +1555,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hnae3_register_ae_dev(ae_dev); - if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) - hns3_enable_sriov(pdev); - return 0; } @@ -1597,11 +1571,43 @@ static void hns3_remove(struct pci_dev *pdev) hnae3_unregister_ae_dev(ae_dev); } +/** + * hns3_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + int ret; + + if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { + dev_warn(&pdev->dev, "Can not config SRIOV\n"); + return -EINVAL; + } + + if (num_vfs) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) + dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); + } else if (!pci_vfs_assigned(pdev)) { + pci_disable_sriov(pdev); + } else { + dev_warn(&pdev->dev, + "Unable to free VFs because some are assigned to VMs.\n"); + } + + return 0; +} + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .sriov_configure = hns3_pci_sriov_configure, }; /* set default feature to hns3 */ -- cgit v1.2.3 From 6a814413eb9eb76d61de154d4f7b46e2a1558d7a Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 15 May 2018 19:20:14 +0100 Subject: net: hns3: Fixes the missing PCI iounmap for various legs We call pcim_iomap in hclge_pci_init, pcim_iounmap should be called in error handle of hclge_init_ae_dev. We call pcim_iomap in hclge_pci_init, but do not call pcim_iounmap in hclge_pci_uninit. When we remove the hclge.ko and insert it again, a problem that pci can not map will happen. pcim_iounmap need to be called in hclge_pci_uninit. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 75d9b8c5793a..46435c85b160 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5425,6 +5425,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + pcim_iounmap(pdev, hdev->hw.io_base); pci_free_irq_vectors(pdev); pci_clear_master(pdev); pci_release_mem_regions(pdev); @@ -5589,6 +5590,7 @@ err_msi_uninit: err_cmd_uninit: hclge_destroy_cmd_queue(&hdev->hw); err_pci_uninit: + pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); -- cgit v1.2.3 From 2578041011c43c38f3742e161cdb99b710d5a9f8 Mon Sep 17 00:00:00 2001 From: Debabrata Banerjee Date: Mon, 14 May 2018 14:48:07 -0400 Subject: bonding: don't queue up extraneous rlb updates arps for incomplete entries can't be sent anyway. Signed-off-by: Debabrata Banerjee Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index a8f60982d483..9f7031ec56d5 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -421,7 +421,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) if (assigned_slave) { rx_hash_table[index].slave = assigned_slave; if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst, - mac_bcast)) { + mac_bcast) && + !is_zero_ether_addr(rx_hash_table[index].mac_dst)) { bond_info->rx_hashtbl[index].ntt = 1; bond_info->rx_ntt = 1; /* A slave has been removed from the @@ -524,7 +525,8 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && + !is_zero_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; ntt = 1; } @@ -565,7 +567,8 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) if ((client_info->ip_src == src_ip) && !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, bond->dev->dev_addr) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && + !is_zero_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond_info->rx_ntt = 1; } @@ -641,7 +644,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon ether_addr_copy(client_info->mac_src, arp->mac_src); client_info->slave = assigned_slave; - if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && + !is_zero_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond->alb_info.rx_ntt = 1; } else { @@ -733,8 +737,10 @@ static void rlb_rebalance(struct bonding *bond) assigned_slave = __rlb_next_rx_slave(bond); if (assigned_slave && (client_info->slave != assigned_slave)) { client_info->slave = assigned_slave; - client_info->ntt = 1; - ntt = 1; + if (!is_zero_ether_addr(client_info->mac_dst)) { + client_info->ntt = 1; + ntt = 1; + } } } -- cgit v1.2.3 From cbeeea70de457ce4de89197d72943ab455b4172c Mon Sep 17 00:00:00 2001 From: Debabrata Banerjee Date: Mon, 14 May 2018 14:48:08 -0400 Subject: bonding: use common mac addr checks Replace homegrown mac addr checks with faster defs from etherdevice.h Note that this will also prevent any rlb arp updates for multicast addresses, however this should have been forbidden anyway. Signed-off-by: Debabrata Banerjee Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f7031ec56d5..67fd1af1d1de 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -40,11 +40,6 @@ #include #include - - -static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -}; static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; @@ -420,9 +415,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) if (assigned_slave) { rx_hash_table[index].slave = assigned_slave; - if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst, - mac_bcast) && - !is_zero_ether_addr(rx_hash_table[index].mac_dst)) { + if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) { bond_info->rx_hashtbl[index].ntt = 1; bond_info->rx_ntt = 1; /* A slave has been removed from the @@ -525,8 +518,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && - !is_zero_ether_addr(client_info->mac_dst)) { + is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; ntt = 1; } @@ -567,8 +559,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) if ((client_info->ip_src == src_ip) && !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, bond->dev->dev_addr) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && - !is_zero_ether_addr(client_info->mac_dst)) { + is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond_info->rx_ntt = 1; } @@ -596,7 +587,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon if ((client_info->ip_src == arp->ip_src) && (client_info->ip_dst == arp->ip_dst)) { /* the entry is already assigned to this client */ - if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) { + if (!is_broadcast_ether_addr(arp->mac_dst)) { /* update mac address from arp */ ether_addr_copy(client_info->mac_dst, arp->mac_dst); } @@ -644,8 +635,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon ether_addr_copy(client_info->mac_src, arp->mac_src); client_info->slave = assigned_slave; - if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast) && - !is_zero_ether_addr(client_info->mac_dst)) { + if (is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond->alb_info.rx_ntt = 1; } else { @@ -1418,9 +1408,9 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) case ETH_P_IP: { const struct iphdr *iph = ip_hdr(skb); - if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || - (iph->daddr == ip_bcast) || - (iph->protocol == IPPROTO_IGMP)) { + if (is_broadcast_ether_addr(eth_data->h_dest) || + iph->daddr == ip_bcast || + iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } @@ -1432,7 +1422,7 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ - if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { + if (is_broadcast_ether_addr(eth_data->h_dest)) { do_tx_balance = false; break; } -- cgit v1.2.3 From e79c1055749e3183a2beee04a24da378623329c5 Mon Sep 17 00:00:00 2001 From: Debabrata Banerjee Date: Mon, 14 May 2018 14:48:09 -0400 Subject: bonding: allow use of tx hashing in balance-alb The rx load balancing provided by balance-alb is not mutually exclusive with using hashing for tx selection, and should provide a decent speed increase because this eliminates spinlocks and cache contention. Signed-off-by: Debabrata Banerjee Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 20 ++++++++++++++++++-- drivers/net/bonding/bond_main.c | 25 +++++++++++++++---------- drivers/net/bonding/bond_options.c | 2 +- include/net/bonding.h | 11 +++++++++-- 4 files changed, 43 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 67fd1af1d1de..e82108c917a6 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1478,8 +1478,24 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) } if (do_tx_balance) { - hash_index = _simple_hash(hash_start, hash_size); - tx_slave = tlb_choose_channel(bond, hash_index, skb->len); + if (bond->params.tlb_dynamic_lb) { + hash_index = _simple_hash(hash_start, hash_size); + tx_slave = tlb_choose_channel(bond, hash_index, skb->len); + } else { + /* + * do_tx_balance means we are free to select the tx_slave + * So we do exactly what tlb would do for hash selection + */ + + struct bond_up_slave *slaves; + unsigned int count; + + slaves = rcu_dereference(bond->slave_arr); + count = slaves ? READ_ONCE(slaves->count) : 0; + if (likely(count)) + tx_slave = slaves->arr[bond_xmit_hash(bond, skb) % + count]; + } } return bond_do_alb_xmit(skb, bond, tx_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4176e1d95f47..a4cd7f6bfd4d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -159,7 +159,7 @@ module_param(min_links, int, 0); MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); module_param(xmit_hash_policy, charp, 0); -MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " +MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " "0 for layer 2 (default), 1 for layer 3+4, " "2 for layer 2+3, 3 for encap layer 2+3, " "4 for encap layer 3+4"); @@ -1735,7 +1735,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, unblock_netpoll_tx(); } - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); bond->nest_level = dev_get_nest_level(bond_dev); @@ -1870,7 +1870,7 @@ static int __bond_release_one(struct net_device *bond_dev, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, slave); netdev_info(bond_dev, "Releasing %s interface %s\n", @@ -3102,7 +3102,7 @@ static int bond_slave_netdev_event(unsigned long event, * events. If these (miimon/arpmon) parameters are configured * then array gets refreshed twice and that should be fine! */ - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); break; case NETDEV_CHANGEMTU: @@ -3322,7 +3322,7 @@ static int bond_open(struct net_device *bond_dev) */ if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) return -ENOMEM; - if (bond->params.tlb_dynamic_lb) + if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) queue_delayed_work(bond->wq, &bond->alb_work, 0); } @@ -3341,7 +3341,7 @@ static int bond_open(struct net_device *bond_dev) bond_3ad_initiate_agg_selection(bond, 1); } - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); return 0; @@ -3894,7 +3894,7 @@ err: * to determine the slave interface - * (a) BOND_MODE_8023AD * (b) BOND_MODE_XOR - * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0 + * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 * * The caller is expected to hold RTNL only and NO other lock! */ @@ -3947,6 +3947,11 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) continue; if (skipslave == slave) continue; + + netdev_dbg(bond->dev, + "Adding slave dev %s to tx hash array[%d]\n", + slave->dev->name, new_arr->count); + new_arr->arr[new_arr->count++] = slave; } @@ -4324,9 +4329,9 @@ static int bond_check_params(struct bond_params *params) } if (xmit_hash_policy) { - if ((bond_mode != BOND_MODE_XOR) && - (bond_mode != BOND_MODE_8023AD) && - (bond_mode != BOND_MODE_TLB)) { + if (bond_mode == BOND_MODE_ROUNDROBIN || + bond_mode == BOND_MODE_ACTIVEBACKUP || + bond_mode == BOND_MODE_BROADCAST) { pr_info("xmit_hash_policy param is irrelevant in mode %s\n", bond_mode_name(bond_mode)); } else { diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 58c705f24f96..8a945c9341d6 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -395,7 +395,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_TLB_DYNAMIC_LB, .name = "tlb_dynamic_lb", .desc = "Enable dynamic flow shuffling", - .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)), + .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB) | BIT(BOND_MODE_ALB)), .values = bond_tlb_dynamic_lb_tbl, .flags = BOND_OPTFLAG_IFDOWN, .set = bond_option_tlb_dynamic_lb_set, diff --git a/include/net/bonding.h b/include/net/bonding.h index b52235158836..808f1d167349 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -285,8 +285,15 @@ static inline bool bond_needs_speed_duplex(const struct bonding *bond) static inline bool bond_is_nondyn_tlb(const struct bonding *bond) { - return (BOND_MODE(bond) == BOND_MODE_TLB) && - (bond->params.tlb_dynamic_lb == 0); + return (bond_is_lb(bond) && bond->params.tlb_dynamic_lb == 0); +} + +static inline bool bond_mode_can_use_xmit_hash(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_8023AD || + BOND_MODE(bond) == BOND_MODE_XOR || + BOND_MODE(bond) == BOND_MODE_TLB || + BOND_MODE(bond) == BOND_MODE_ALB); } static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) -- cgit v1.2.3 From 1386c36b30388f46a95100924bfcae75160db715 Mon Sep 17 00:00:00 2001 From: Debabrata Banerjee Date: Mon, 14 May 2018 14:48:10 -0400 Subject: bonding: allow carrier and link status to determine link state In a mixed environment it may be difficult to tell if your hardware support carrier, if it does not it can always report true. With a new use_carrier option of 2, we can check both carrier and link status sequentially, instead of one or the other Signed-off-by: Debabrata Banerjee Signed-off-by: David S. Miller --- Documentation/networking/bonding.txt | 4 ++-- drivers/net/bonding/bond_main.c | 12 ++++++++---- drivers/net/bonding/bond_options.c | 7 ++++--- 3 files changed, 14 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index c13214d073a4..86d07fbb592d 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -828,8 +828,8 @@ use_carrier MII / ETHTOOL ioctl method to determine the link state. A value of 1 enables the use of netif_carrier_ok(), a value of - 0 will use the deprecated MII / ETHTOOL ioctls. The default - value is 1. + 0 will use the deprecated MII / ETHTOOL ioctls. A value of 2 + will check both. The default value is 1. xmit_hash_policy diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a4cd7f6bfd4d..e4c253dc7dfb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -132,7 +132,7 @@ MODULE_PARM_DESC(downdelay, "Delay before considering link down, " "in milliseconds"); module_param(use_carrier, int, 0); MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " - "0 for off, 1 for on (default)"); + "0 for off, 1 for on (default), 2 for carrier then legacy checks"); module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " "1 for active-backup, 2 for balance-xor, " @@ -434,12 +434,16 @@ static int bond_check_dev_link(struct bonding *bond, int (*ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; + bool carrier = true; if (!reporting && !netif_running(slave_dev)) return 0; if (bond->params.use_carrier) - return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; + carrier = netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; + + if (!carrier) + return carrier; /* Try to get link status using Ethtool first. */ if (slave_dev->ethtool_ops->get_link) @@ -4403,8 +4407,8 @@ static int bond_check_params(struct bond_params *params) downdelay = 0; } - if ((use_carrier != 0) && (use_carrier != 1)) { - pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", + if (use_carrier < 0 || use_carrier > 2) { + pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0-2), so it was set to 1\n", use_carrier); use_carrier = 1; } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 8a945c9341d6..dba6cef05134 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -164,9 +164,10 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = { }; static const struct bond_opt_value bond_use_carrier_tbl[] = { - { "off", 0, 0}, - { "on", 1, BOND_VALFLAG_DEFAULT}, - { NULL, -1, 0} + { "off", 0, 0}, + { "on", 1, BOND_VALFLAG_DEFAULT}, + { "both", 2, 0}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_all_slaves_active_tbl[] = { -- cgit v1.2.3 From b3c898e20b1881b0876c3e811c58b039b37dd5fd Mon Sep 17 00:00:00 2001 From: Debabrata Banerjee Date: Wed, 16 May 2018 14:02:13 -0400 Subject: Revert "bonding: allow carrier and link status to determine link state" This reverts commit 1386c36b30388f46a95100924bfcae75160db715. We don't want to encourage drivers to not report carrier status correctly, therefore remove this commit. Signed-off-by: Debabrata Banerjee Signed-off-by: David S. Miller --- Documentation/networking/bonding.txt | 4 ++-- drivers/net/bonding/bond_main.c | 12 ++++-------- drivers/net/bonding/bond_options.c | 7 +++---- 3 files changed, 9 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 86d07fbb592d..c13214d073a4 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -828,8 +828,8 @@ use_carrier MII / ETHTOOL ioctl method to determine the link state. A value of 1 enables the use of netif_carrier_ok(), a value of - 0 will use the deprecated MII / ETHTOOL ioctls. A value of 2 - will check both. The default value is 1. + 0 will use the deprecated MII / ETHTOOL ioctls. The default + value is 1. xmit_hash_policy diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e4c253dc7dfb..a4cd7f6bfd4d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -132,7 +132,7 @@ MODULE_PARM_DESC(downdelay, "Delay before considering link down, " "in milliseconds"); module_param(use_carrier, int, 0); MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " - "0 for off, 1 for on (default), 2 for carrier then legacy checks"); + "0 for off, 1 for on (default)"); module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " "1 for active-backup, 2 for balance-xor, " @@ -434,16 +434,12 @@ static int bond_check_dev_link(struct bonding *bond, int (*ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; - bool carrier = true; if (!reporting && !netif_running(slave_dev)) return 0; if (bond->params.use_carrier) - carrier = netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; - - if (!carrier) - return carrier; + return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; /* Try to get link status using Ethtool first. */ if (slave_dev->ethtool_ops->get_link) @@ -4407,8 +4403,8 @@ static int bond_check_params(struct bond_params *params) downdelay = 0; } - if (use_carrier < 0 || use_carrier > 2) { - pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0-2), so it was set to 1\n", + if ((use_carrier != 0) && (use_carrier != 1)) { + pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", use_carrier); use_carrier = 1; } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index dba6cef05134..8a945c9341d6 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -164,10 +164,9 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = { }; static const struct bond_opt_value bond_use_carrier_tbl[] = { - { "off", 0, 0}, - { "on", 1, BOND_VALFLAG_DEFAULT}, - { "both", 2, 0}, - { NULL, -1, 0} + { "off", 0, 0}, + { "on", 1, BOND_VALFLAG_DEFAULT}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_all_slaves_active_tbl[] = { -- cgit v1.2.3 From c6213eb1aee308e67377fd1890d84f7284caf531 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 15 May 2018 18:37:25 -0500 Subject: net: ethernet: ti: cpsw-phy-sel: check bus_find_device() ret value This fixes klockworks warnings: Pointer 'dev' returned from call to function 'bus_find_device' at line 179 may be NULL and will be dereferenced at line 181. cpsw-phy-sel.c:179: 'dev' is assigned the return value from function 'bus_find_device'. bus.c:342: 'bus_find_device' explicitly returns a NULL value. cpsw-phy-sel.c:181: 'dev' is dereferenced by passing argument 1 to function 'dev_get_drvdata'. device.h:1024: 'dev' is passed to function 'dev_get_drvdata'. device.h:1026: 'dev' is explicitly dereferenced. Signed-off-by: Grygorii Strashko [nsekhar@ti.com: add an error message, fix return path] Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw-phy-sel.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 18013645e76c..0c1adad7415d 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); - of_node_put(node); + if (!dev) { + dev_err(dev, "unable to find platform device for %pOF\n", node); + goto out; + } + priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); put_device(dev); +out: + of_node_put(node); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); -- cgit v1.2.3 From 00e798c7d1ea5c4514401f17db8300db934291cb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 15 May 2018 16:56:19 -0700 Subject: drivers: net: Remove device_node checks with of_mdiobus_register() A number of drivers have the following pattern: if (np) of_mdiobus_register() else mdiobus_register() which the implementation of of_mdiobus_register() now takes care of. Remove that pattern in drivers that strictly adhere to it. Signed-off-by: Florian Fainelli Reviewed-by: Grygorii Strashko Reviewed-by: Fugang Duan Reviewed-by: Antoine Tenart Reviewed-by: Jose Abreu Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 8 ++------ drivers/net/dsa/mv88e6xxx/chip.c | 5 +---- drivers/net/ethernet/cadence/macb_main.c | 12 +++--------- drivers/net/ethernet/freescale/fec_main.c | 8 ++------ drivers/net/ethernet/marvell/mvmdio.c | 5 +---- drivers/net/ethernet/renesas/sh_eth.c | 11 +++-------- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 5 +---- drivers/net/ethernet/ti/davinci_mdio.c | 8 +++----- drivers/net/phy/mdio-gpio.c | 6 +----- drivers/net/phy/mdio-mscc-miim.c | 6 +----- drivers/net/usb/lan78xx.c | 7 ++----- 11 files changed, 20 insertions(+), 61 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ac621f44237a..02e8982519ce 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -450,12 +450,8 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - if (dn) - err = of_mdiobus_register(priv->slave_mii_bus, dn); - else - err = mdiobus_register(priv->slave_mii_bus); - - if (err) + err = of_mdiobus_register(priv->slave_mii_bus, dn); + if (err && dn) of_node_put(dn); return err; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b23c11d9f4b2..2bb3f03ee1cb 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2454,10 +2454,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, return err; } - if (np) - err = of_mdiobus_register(bus, np); - else - err = mdiobus_register(bus); + err = of_mdiobus_register(bus, np); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); mv88e6xxx_g2_irq_mdio_free(chip, bus); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b4c9268100bb..3e93df5d4e3b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -591,16 +591,10 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; - if (np) { - err = of_mdiobus_register(bp->mii_bus, np); - } else { - if (pdata) - bp->mii_bus->phy_mask = pdata->phy_mask; - - err = mdiobus_register(bp->mii_bus); - } - + err = of_mdiobus_register(bp->mii_bus, np); if (err) goto err_out_free_mdiobus; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4604bc8eb5b..f3e43db0d6cb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2052,13 +2052,9 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->parent = &pdev->dev; node = of_get_child_by_name(pdev->dev.of_node, "mdio"); - if (node) { - err = of_mdiobus_register(fep->mii_bus, node); + err = of_mdiobus_register(fep->mii_bus, node); + if (node) of_node_put(node); - } else { - err = mdiobus_register(fep->mii_bus); - } - if (err) goto err_out_free_mdiobus; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 0495487f7b42..c5dac6bd2be4 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -348,10 +348,7 @@ static int orion_mdio_probe(struct platform_device *pdev) goto out_mdio; } - if (pdev->dev.of_node) - ret = of_mdiobus_register(bus, pdev->dev.of_node); - else - ret = mdiobus_register(bus); + ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); goto out_mdio; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5970d9e5ddf1..8dd41e08a6c6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3025,15 +3025,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, pdev->name, pdev->id); /* register MDIO bus */ - if (dev->of_node) { - ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); - } else { - if (pd->phy_irq > 0) - mdp->mii_bus->irq[pd->phy] = pd->phy_irq; - - ret = mdiobus_register(mdp->mii_bus); - } + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); if (ret) goto out_free_bus; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index f5f37bfa1d58..5df1a608e566 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -233,10 +233,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->phy_mask = mdio_bus_data->phy_mask; new_bus->parent = priv->device; - if (mdio_node) - err = of_mdiobus_register(new_bus, mdio_node); - else - err = mdiobus_register(new_bus); + err = of_mdiobus_register(new_bus, mdio_node); if (err != 0) { dev_err(dev, "Cannot register the MDIO bus\n"); goto bus_register_fail; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98a1c97fb95e..8ac72831af05 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -429,12 +429,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) * defined to support backward compatibility with DTs which assume that * Davinci MDIO will always scan the bus for PHYs detection. */ - if (dev->of_node && of_get_child_count(dev->of_node)) { + if (dev->of_node && of_get_child_count(dev->of_node)) data->skip_scan = true; - ret = of_mdiobus_register(data->bus, dev->of_node); - } else { - ret = mdiobus_register(data->bus); - } + + ret = of_mdiobus_register(data->bus, dev->of_node); if (ret) goto bail_out; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index b501221819e1..4e4c8daf44c3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -179,11 +179,7 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (!new_bus) return -ENODEV; - if (pdev->dev.of_node) - ret = of_mdiobus_register(new_bus, pdev->dev.of_node); - else - ret = mdiobus_register(new_bus); - + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); if (ret) mdio_gpio_bus_deinit(&pdev->dev); diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c index 8c689ccfdbca..badbc99bedd3 100644 --- a/drivers/net/phy/mdio-mscc-miim.c +++ b/drivers/net/phy/mdio-mscc-miim.c @@ -151,11 +151,7 @@ static int mscc_miim_probe(struct platform_device *pdev) } } - if (pdev->dev.of_node) - ret = of_mdiobus_register(bus, pdev->dev.of_node); - else - ret = mdiobus_register(bus); - + ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); return ret; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 91761436709a..8dff87ec6d99 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1843,12 +1843,9 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) } node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); - if (node) { - ret = of_mdiobus_register(dev->mdiobus, node); + ret = of_mdiobus_register(dev->mdiobus, node); + if (node) of_node_put(node); - } else { - ret = mdiobus_register(dev->mdiobus); - } if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; -- cgit v1.2.3 From 1eece799d3f611a28a25319aabbccd4ac948098f Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Tue, 15 May 2018 18:52:00 -0600 Subject: net: qualcomm: rmnet: Capture all drops in transmit path Packets in transmit path could potentially be dropped if there were errors while adding the MAP header or the checksum header. Increment the tx_drops stats in these cases. Additionally, refactor the code to free the packet and increment the tx_drops stat under a single label. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 6fcd586e9804..7fd86d40a337 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -148,7 +148,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) - goto fail; + return -ENOMEM; } if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) @@ -156,17 +156,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) - goto fail; + return -ENOMEM; map_header->mux_id = mux_id; skb->protocol = htons(ETH_P_MAP); return 0; - -fail: - kfree_skb(skb); - return -ENOMEM; } static void @@ -228,15 +224,18 @@ void rmnet_egress_handler(struct sk_buff *skb) mux_id = priv->mux_id; port = rmnet_get_port(skb->dev); - if (!port) { - kfree_skb(skb); - return; - } + if (!port) + goto drop; if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) - return; + goto drop; rmnet_vnd_tx_fixup(skb, orig_dev); dev_queue_xmit(skb); + return; + +drop: + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); } -- cgit v1.2.3 From bbde32d38bfbbc4a6970498c7470a8a817122735 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Tue, 15 May 2018 18:52:01 -0600 Subject: net: qualcomm: rmnet: Add support for ethtool private stats Add ethtool private stats handler to debug the handling of packets with checksum offload header / trailer. This allows to keep track of the number of packets for which hardware computes the checksum and counts and reasons where checksum computation was skipped in hardware and was done in the network stack. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 13 +++++ .../net/ethernet/qualcomm/rmnet/rmnet_map_data.c | 64 ++++++++++++++++------ drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 51 +++++++++++++++++ 3 files changed, 112 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 0b5b5da80198..34ac45a774e7 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -54,11 +54,24 @@ struct rmnet_pcpu_stats { struct u64_stats_sync syncp; }; +struct rmnet_priv_stats { + u64 csum_ok; + u64 csum_valid_unset; + u64 csum_validation_failed; + u64 csum_err_bad_buffer; + u64 csum_err_invalid_ip_version; + u64 csum_err_invalid_transport; + u64 csum_fragmented_pkt; + u64 csum_skipped; + u64 csum_sw; +}; + struct rmnet_priv { u8 mux_id; struct net_device *real_dev; struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; + struct rmnet_priv_stats stats; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index a6ea09416f8d..57a9c314a665 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, static int rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, - struct rmnet_map_dl_csum_trailer *csum_trailer) + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) { __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; u16 csum_value, csum_value_final; @@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, ip4h = (struct iphdr *)(skb->data); if ((ntohs(ip4h->frag_off) & IP_MF) || - ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { + priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; + } txporthdr = skb->data + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); - if (!csum_field) + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; + } /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ - if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) { + priv->stats.csum_skipped++; return 0; + } csum_value = ~ntohs(csum_trailer->csum_value); hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); @@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, } } - if (csum_value_final == ntohs((__force __be16)*csum_field)) + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; return 0; - else + } else { + priv->stats.csum_validation_failed++; return -EINVAL; + } } #if IS_ENABLED(CONFIG_IPV6) static int rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, - struct rmnet_map_dl_csum_trailer *csum_trailer) + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) { __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; u16 csum_value, csum_value_final; @@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, txporthdr = skb->data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); - if (!csum_field) + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; + } csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) @@ -164,10 +177,13 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, } } - if (csum_value_final == ntohs((__force __be16)*csum_field)) + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; return 0; - else + } else { + priv->stats.csum_validation_failed++; return -EINVAL; + } } #endif @@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, */ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) { + struct rmnet_priv *priv = netdev_priv(skb->dev); struct rmnet_map_dl_csum_trailer *csum_trailer; - if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { + priv->stats.csum_sw++; return -EOPNOTSUPP; + } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); - if (!csum_trailer->valid) + if (!csum_trailer->valid) { + priv->stats.csum_valid_unset++; return -EINVAL; + } - if (skb->protocol == htons(ETH_P_IP)) - return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); - else if (skb->protocol == htons(ETH_P_IPV6)) + if (skb->protocol == htons(ETH_P_IP)) { + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); + } else if (skb->protocol == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_IPV6) - return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); #else + priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; #endif + } else { + priv->stats.csum_err_invalid_ip_version++; + return -EPROTONOSUPPORT; + } return 0; } @@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev) { + struct rmnet_priv *priv = netdev_priv(orig_dev); struct rmnet_map_ul_csum_header *ul_header; void *iphdr; @@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); return; #else + priv->stats.csum_err_invalid_ip_version++; goto sw_csum; #endif + } else { + priv->stats.csum_err_invalid_ip_version++; } } @@ -399,4 +429,6 @@ sw_csum: ul_header->csum_insert_offset = 0; ul_header->csum_enabled = 0; ul_header->udp_ip4_ind = 0; + + priv->stats.csum_sw++; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 2ea16a088de8..cb02e1a015c1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_get_stats64 = rmnet_get_stats64, }; +static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { + "Checksum ok", + "Checksum valid bit not set", + "Checksum validation failed", + "Checksum error bad buffer", + "Checksum error bad ip version", + "Checksum error bad transport", + "Checksum skipped on ip fragment", + "Checksum skipped", + "Checksum computed in software", +}; + +static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &rmnet_gstrings_stats, + sizeof(rmnet_gstrings_stats)); + break; + } +} + +static int rmnet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rmnet_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void rmnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_priv_stats *st = &priv->stats; + + if (!data) + return; + + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); +} + +static const struct ethtool_ops rmnet_ethtool_ops = { + .get_ethtool_stats = rmnet_get_ethtool_stats, + .get_strings = rmnet_get_strings, + .get_sset_count = rmnet_get_sset_count, +}; + /* Called by kernel whenever a new rmnet device is created. Sets MTU, * flags, ARP type, needed headroom, etc... */ @@ -170,6 +220,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); rmnet_dev->needs_free_netdev = true; + rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; } /* Exposed API */ -- cgit v1.2.3 From 721ce0f644401c923a266cb084c40ebc58b18f93 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Tue, 15 May 2018 18:52:02 -0600 Subject: net: qualcomm: rmnet: Remove redundant command check The command packet size is already checked once in rmnet_map_deaggregate() for the header, packet and trailer size, so this additional check is not needed. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 78fdad0c6f76..56a93df962e6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -69,17 +69,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_map_control_command *cmd; int xmit_status; - if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { - if (skb->len < sizeof(struct rmnet_map_header) + - RMNET_MAP_GET_LENGTH(skb) + - sizeof(struct rmnet_map_dl_csum_trailer)) { - kfree_skb(skb); - return; - } - - skb_trim(skb, skb->len - - sizeof(struct rmnet_map_dl_csum_trailer)); - } + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + skb_trim(skb, + skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); -- cgit v1.2.3 From 9e6881d3665688d14b2ad4860f4e28af4ee02b63 Mon Sep 17 00:00:00 2001 From: Hemanth Puranik Date: Wed, 16 May 2018 06:40:53 +0530 Subject: net: qcom/emac: Encapsulate sgmii ops under one structure This patch introduces ops structure for sgmii, This by ensures that we do not need dummy functions in case of emulation platforms. Signed-off-by: Hemanth Puranik Acked-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 5 +- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 128 ++++++++++++++---------- drivers/net/ethernet/qualcomm/emac/emac-sgmii.h | 32 +++--- drivers/net/ethernet/qualcomm/emac/emac.c | 9 +- 4 files changed, 103 insertions(+), 71 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index d5a32b7c7dc5..092718a03786 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -920,14 +920,13 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); - struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; if (phydev->link) { emac_mac_start(adpt); - sgmii->link_up(adpt); + emac_sgmii_link_change(adpt, true); } else { - sgmii->link_down(adpt); + emac_sgmii_link_change(adpt, false); emac_mac_stop(adpt); } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index e8ab512ee7e3..562420b834df 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -53,6 +53,46 @@ #define SERDES_START_WAIT_TIMES 100 +int emac_sgmii_init(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->init)) + return 0; + + return adpt->phy.sgmii_ops->init(adpt); +} + +int emac_sgmii_open(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->open)) + return 0; + + return adpt->phy.sgmii_ops->open(adpt); +} + +void emac_sgmii_close(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->close)) + return; + + adpt->phy.sgmii_ops->close(adpt); +} + +int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->link_change)) + return 0; + + return adpt->phy.sgmii_ops->link_change(adpt, link_state); +} + +void emac_sgmii_reset(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->reset)) + return; + + adpt->phy.sgmii_ops->reset(adpt); +} + /* Initialize the SGMII link between the internal and external PHYs. */ static void emac_sgmii_link_init(struct emac_adapter *adpt) { @@ -163,21 +203,21 @@ static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) msleep(50); } -void emac_sgmii_reset(struct emac_adapter *adpt) +static void emac_sgmii_common_reset(struct emac_adapter *adpt) { int ret; emac_sgmii_reset_prepare(adpt); emac_sgmii_link_init(adpt); - ret = adpt->phy.initialize(adpt); + ret = emac_sgmii_init(adpt); if (ret) netdev_err(adpt->netdev, "could not reinitialize internal PHY (error=%i)\n", ret); } -static int emac_sgmii_open(struct emac_adapter *adpt) +static int emac_sgmii_common_open(struct emac_adapter *adpt) { struct emac_sgmii *sgmii = &adpt->phy; int ret; @@ -201,43 +241,53 @@ static int emac_sgmii_open(struct emac_adapter *adpt) return 0; } -static int emac_sgmii_close(struct emac_adapter *adpt) +static void emac_sgmii_common_close(struct emac_adapter *adpt) { struct emac_sgmii *sgmii = &adpt->phy; /* Make sure interrupts are disabled */ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); free_irq(sgmii->irq, adpt); - - return 0; } /* The error interrupts are only valid after the link is up */ -static int emac_sgmii_link_up(struct emac_adapter *adpt) +static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup) { struct emac_sgmii *sgmii = &adpt->phy; int ret; - /* Clear and enable interrupts */ - ret = emac_sgmii_irq_clear(adpt, 0xff); - if (ret) - return ret; + if (linkup) { + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; - writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + writel(SGMII_ISR_MASK, + sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + } else { + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + } return 0; } -static int emac_sgmii_link_down(struct emac_adapter *adpt) -{ - struct emac_sgmii *sgmii = &adpt->phy; - - /* Disable interrupts */ - writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); - synchronize_irq(sgmii->irq); +static struct sgmii_ops qdf2432_ops = { + .init = emac_sgmii_init_qdf2432, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; - return 0; -} +static struct sgmii_ops qdf2400_ops = { + .init = emac_sgmii_init_qdf2400, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; static int emac_sgmii_acpi_match(struct device *dev, void *data) { @@ -249,7 +299,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_function *initialize = data; + struct sgmii_ops **ops = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -270,10 +320,10 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) switch (hrv) { case 1: - *initialize = emac_sgmii_init_qdf2432; + *ops = &qdf2432_ops; return 1; case 2: - *initialize = emac_sgmii_init_qdf2400; + *ops = &qdf2400_ops; return 1; } } @@ -294,14 +344,6 @@ static const struct of_device_id emac_sgmii_dt_match[] = { {} }; -/* Dummy function for systems without an internal PHY. This avoids having - * to check for NULL pointers before calling the functions. - */ -static int emac_sgmii_dummy(struct emac_adapter *adpt) -{ - return 0; -} - int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; @@ -312,22 +354,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (has_acpi_companion(&pdev->dev)) { struct device *dev; - dev = device_find_child(&pdev->dev, &phy->initialize, + dev = device_find_child(&pdev->dev, &phy->sgmii_ops, emac_sgmii_acpi_match); if (!dev) { dev_warn(&pdev->dev, "cannot find internal phy node\n"); - /* There is typically no internal PHY on emulation - * systems, so if we can't find the node, assume - * we are on an emulation system and stub-out - * support for the internal PHY. These systems only - * use ACPI. - */ - phy->open = emac_sgmii_dummy; - phy->close = emac_sgmii_dummy; - phy->link_up = emac_sgmii_dummy; - phy->link_down = emac_sgmii_dummy; - return 0; } @@ -355,14 +386,9 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_function)match->data; + phy->sgmii_ops->init = match->data; } - phy->open = emac_sgmii_open; - phy->close = emac_sgmii_close; - phy->link_up = emac_sgmii_link_up; - phy->link_down = emac_sgmii_link_down; - /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -386,7 +412,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) } } - ret = phy->initialize(adpt); + ret = emac_sgmii_init(adpt); if (ret) goto error; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index e7c0c3b2baa4..31ba21eb61d2 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,36 +16,44 @@ struct emac_adapter; struct platform_device; -typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); +/** emac_sgmii - internal emac phy + * @init initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_change called when the link state changes + */ +struct sgmii_ops { + int (*init)(struct emac_adapter *adpt); + int (*open)(struct emac_adapter *adpt); + void (*close)(struct emac_adapter *adpt); + int (*link_change)(struct emac_adapter *adpt, bool link_state); + void (*reset)(struct emac_adapter *adpt); +}; /** emac_sgmii - internal emac phy * @base base address * @digital per-lane digital block * @irq the interrupt number * @decode_error_count reference count of consecutive decode errors - * @initialize initialization function - * @open called when the driver is opened - * @close called when the driver is closed - * @link_up called when the link comes up - * @link_down called when the link comes down + * @sgmii_ops sgmii ops */ struct emac_sgmii { void __iomem *base; void __iomem *digital; unsigned int irq; atomic_t decode_error_count; - emac_sgmii_function initialize; - emac_sgmii_function open; - emac_sgmii_function close; - emac_sgmii_function link_up; - emac_sgmii_function link_down; + struct sgmii_ops *sgmii_ops; }; int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); -void emac_sgmii_reset(struct emac_adapter *adpt); int emac_sgmii_init_fsm9900(struct emac_adapter *adpt); int emac_sgmii_init_qdf2432(struct emac_adapter *adpt); int emac_sgmii_init_qdf2400(struct emac_adapter *adpt); +int emac_sgmii_init(struct emac_adapter *adpt); +int emac_sgmii_open(struct emac_adapter *adpt); +void emac_sgmii_close(struct emac_adapter *adpt); +int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state); +void emac_sgmii_reset(struct emac_adapter *adpt); #endif diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 13235baf4766..2a0cbc535a2e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -253,7 +253,7 @@ static int emac_open(struct net_device *netdev) return ret; } - ret = adpt->phy.open(adpt); + ret = emac_sgmii_open(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); free_irq(irq->irq, irq); @@ -264,7 +264,7 @@ static int emac_open(struct net_device *netdev) if (ret) { emac_mac_rx_tx_rings_free_all(adpt); free_irq(irq->irq, irq); - adpt->phy.close(adpt); + emac_sgmii_close(adpt); return ret; } @@ -278,7 +278,7 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); - adpt->phy.close(adpt); + emac_sgmii_close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); @@ -761,11 +761,10 @@ static void emac_shutdown(struct platform_device *pdev) { struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); - struct emac_sgmii *sgmii = &adpt->phy; if (netdev->flags & IFF_UP) { /* Closing the SGMII turns off its interrupts */ - sgmii->close(adpt); + emac_sgmii_close(adpt); /* Resetting the MAC turns off all DMA and its interrupts */ emac_mac_reset(adpt); -- cgit v1.2.3 From 93120eba4890e0fcf1e42059d495a7d852db31ed Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 16 May 2018 18:59:19 +0800 Subject: net: stmmac: Remove useless test before clk_disable_unprepare clk_disable_unprepare() already checks that the clock pointer is valid. No need to test it before calling it. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 13133b30b575..f08625a02cea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1104,30 +1104,20 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) } else { if (bsp_priv->clk_enabled) { if (phy_iface == PHY_INTERFACE_MODE_RMII) { - if (!IS_ERR(bsp_priv->mac_clk_rx)) - clk_disable_unprepare( - bsp_priv->mac_clk_rx); + clk_disable_unprepare(bsp_priv->mac_clk_rx); - if (!IS_ERR(bsp_priv->clk_mac_ref)) - clk_disable_unprepare( - bsp_priv->clk_mac_ref); + clk_disable_unprepare(bsp_priv->clk_mac_ref); - if (!IS_ERR(bsp_priv->clk_mac_refout)) - clk_disable_unprepare( - bsp_priv->clk_mac_refout); + clk_disable_unprepare(bsp_priv->clk_mac_refout); } - if (!IS_ERR(bsp_priv->clk_phy)) - clk_disable_unprepare(bsp_priv->clk_phy); + clk_disable_unprepare(bsp_priv->clk_phy); - if (!IS_ERR(bsp_priv->aclk_mac)) - clk_disable_unprepare(bsp_priv->aclk_mac); + clk_disable_unprepare(bsp_priv->aclk_mac); - if (!IS_ERR(bsp_priv->pclk_mac)) - clk_disable_unprepare(bsp_priv->pclk_mac); + clk_disable_unprepare(bsp_priv->pclk_mac); - if (!IS_ERR(bsp_priv->mac_clk_tx)) - clk_disable_unprepare(bsp_priv->mac_clk_tx); + clk_disable_unprepare(bsp_priv->mac_clk_tx); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); -- cgit v1.2.3 From 76e597eb15297d402685ee1442405d3eea70136b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 16 May 2018 19:18:22 +0800 Subject: net: ethoc: Remove useless test before clk_disable_unprepare clk_disable_unprepare() already checks that the clock pointer is valid. No need to test it before calling it. Signed-off-by: YueHaibing Reviewed-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/ethoc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 8bb0db990c8f..00a57273b753 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1246,8 +1246,7 @@ error: mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); free2: - if (priv->clk) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free: free_netdev(netdev); out: @@ -1271,8 +1270,7 @@ static int ethoc_remove(struct platform_device *pdev) mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); } - if (priv->clk) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); unregister_netdev(netdev); free_netdev(netdev); } -- cgit v1.2.3 From 974f6c046ad90d071f96c76ff8ab29355a798c41 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:38 +0300 Subject: qed: LL2 flush isles when connection is closed Driver should free all pending isles once it gets a FLUSH cqe from FW. Part of iSCSI out of order flow. Fixes: 1d6cff4fca4366 ("qed: Add iSCSI out of order packet handling") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c3c1a99a0252..be63f0a2705d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -591,6 +591,27 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) } } +static bool +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, + struct core_rx_slow_path_cqe *p_cqe) +{ + struct ooo_opaque *iscsi_ooo; + u32 cid; + + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) + return false; + + iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; + if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + return false; + + /* Need to make a flush */ + cid = le32_to_cpu(iscsi_ooo->cid); + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); + + return true; +} + static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { @@ -617,6 +638,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cqe_type = cqe->rx_cqe_sp.type; + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, + &cqe->rx_cqe_sp)) + continue; + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", -- cgit v1.2.3 From 6291c608a908a9d62be650373bce7f734311d07c Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:39 +0300 Subject: qed: Fix possibility of list corruption during rmmod flows The ll2 flows of flushing the txq/rxq need to be synchronized with the regular fp processing. Caused list corruption during load/unload stress tests. Fixes: 0a7fb11c23c0f ("qed: Add Light L2 support") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index be63f0a2705d..4a78294d6481 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_tx_queue *p_tx; + unsigned long flags = 0; dma_addr_t tx_frag; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_tx = &p_ll2_conn->tx_queue; + spin_lock_irqsave(&p_tx->lock, flags); while (!list_empty(&p_tx->active_descq)) { p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + spin_unlock_irqrestore(&p_tx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag, b_last_packet); } + spin_lock_irqsave(&p_tx->lock, flags); } + spin_unlock_irqrestore(&p_tx->lock, flags); } static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_queue *p_rx; + unsigned long flags = 0; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_rx = &p_ll2_conn->rx_queue; + spin_lock_irqsave(&p_rx->lock, flags); while (!list_empty(&p_rx->active_descq)) { p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) break; - list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); + spin_unlock_irqrestore(&p_rx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) cookie, rx_buf_addr, b_last); } + spin_lock_irqsave(&p_rx->lock, flags); } + spin_unlock_irqrestore(&p_rx->lock, flags); } static bool -- cgit v1.2.3 From fc16f56b7bfd5bfe812ba2cd3a4c8943977e8306 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 16 May 2018 14:44:40 +0300 Subject: qed: Fix LL2 race during connection terminate Stress on qedi/qedr load unload lead to list_del corruption. This is due to ll2 connection terminate freeing resources without verifying that no more ll2 processing will occur. This patch unregisters the ll2 status block before terminating the connection to assure this race does not occur. Fixes: 1d6cff4fca4366 ("qed: Add iSCSI out of order packet handling") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 4a78294d6481..c97ebd681c47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -829,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); if (rc) return rc; @@ -849,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); @@ -1915,17 +1921,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->tx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_txq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->rx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_rxq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); } if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) @@ -1974,16 +1988,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) if (!p_ll2_conn) return; - if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); - } - - if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); - } - kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); -- cgit v1.2.3 From 8e725f7caafb8e820e05707fe9853023375438cf Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Wed, 16 May 2018 19:51:15 +0530 Subject: cxgb4: update LE-TCAM collection for T6 For T6, clip table is separated from main TCAM. So, update LE-TCAM collection logic to collect clip table TCAM as well. IPv6 takes 4 entries in clip table TCAM compared to 2 entries in main TCAM. Also, in case of errors, keep LE-TCAM collected so far and set the status to partial dump. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 3 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 40 ++++++++++++++++++----- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 1 + 4 files changed, 37 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index b57acb8dc35b..740a18ba4229 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -235,6 +235,9 @@ struct cudbg_vpd_data { }; #define CUDBG_MAX_TCAM_TID 0x800 +#define CUDBG_T6_CLIP 1536 +#define CUDBG_MAX_TID_COMP_EN 6144 +#define CUDBG_MAX_TID_COMP_DIS 3072 enum cudbg_le_entry_types { LE_ET_UNKNOWN = 0, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 8568a51f6414..215fe6260fd7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -24,6 +24,7 @@ #define CUDBG_STATUS_NOT_IMPLEMENTED -28 #define CUDBG_SYSTEM_ERROR -29 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32 +#define CUDBG_STATUS_PARTIAL_DATA -41 #define CUDBG_MAJOR_VERSION 1 #define CUDBG_MINOR_VERSION 14 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 9da6f57901a9..4feb7eca0acf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -2366,8 +2366,11 @@ void cudbg_fill_le_tcam_info(struct adapter *padap, value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); tcam_region->routing_start = value; - /*Get clip table index */ - value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); + /* Get clip table index. For T6 there is separate CLIP TCAM */ + if (is_t6(padap->params.chip)) + value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A); + else + value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); tcam_region->clip_start = value; /* Get filter table index */ @@ -2392,8 +2395,16 @@ void cudbg_fill_le_tcam_info(struct adapter *padap, tcam_region->tid_hash_base; } } else { /* hash not enabled */ - tcam_region->max_tid = CUDBG_MAX_TCAM_TID; + if (is_t6(padap->params.chip)) + tcam_region->max_tid = (value & ASLIPCOMPEN_F) ? + CUDBG_MAX_TID_COMP_EN : + CUDBG_MAX_TID_COMP_DIS; + else + tcam_region->max_tid = CUDBG_MAX_TCAM_TID; } + + if (is_t6(padap->params.chip)) + tcam_region->max_tid += CUDBG_T6_CLIP; } int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, @@ -2423,18 +2434,31 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, for (i = 0; i < tcam_region.max_tid; ) { rc = cudbg_read_tid(pdbg_init, i, tid_data); if (rc) { - cudbg_err->sys_err = rc; - cudbg_put_buff(pdbg_init, &temp_buff); - return rc; + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + /* Update tcam header and exit */ + tcam_region.max_tid = i; + memcpy(temp_buff.data, &tcam_region, + sizeof(struct cudbg_tcam)); + goto out; } - /* ipv6 takes two tids */ - cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++; + if (cudbg_is_ipv6_entry(tid_data, tcam_region)) { + /* T6 CLIP TCAM: ipv6 takes 4 entries */ + if (is_t6(padap->params.chip) && + i >= tcam_region.clip_start && + i < tcam_region.clip_start + CUDBG_T6_CLIP) + i += 4; + else /* Main TCAM: ipv6 takes two tids */ + i += 2; + } else { + i++; + } tid_data++; bytes += sizeof(struct cudbg_tid_data); } +out: return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 843f8cada1de..6b55aa2eb2a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2999,6 +2999,7 @@ #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c +#define LE_DB_CLCAM_TID_BASE_A 0x19df4 #define LE_DB_TID_HASHBASE_A 0x19df8 #define T6_LE_DB_HASH_TID_BASE_A 0x19df8 -- cgit v1.2.3 From a5898e97f01acfbcafe9e90196b1c811a46ebf44 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 16 May 2018 17:46:45 -0700 Subject: net/mlx5: Vport, Use 'kvfree()' for memory allocated by 'kvzalloc()' When 'kvzalloc()' is used to allocate memory, 'kvfree()' must be used to free it. Fixes: 9efa75254593d ("net/mlx5_core: Introduce access functions to query vport RoCE fields") Signed-off-by: Christophe JAILLET Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 177e076b8d17..719cecb182c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -511,7 +511,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.system_image_guid); - kfree(out); + kvfree(out); return 0; } @@ -531,7 +531,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); - kfree(out); + kvfree(out); return 0; } @@ -587,7 +587,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); - kfree(out); + kvfree(out); return 0; } -- cgit v1.2.3 From e574978ae52a3fb1346bc37de2f2221dd878c065 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 16 May 2018 17:49:01 -0700 Subject: net/mlx5: Eswitch, Use 'kvfree()' for memory allocated by 'kvzalloc()' When 'kvzalloc()' is used to allocate memory, 'kvfree()' must be used to free it. Fixes: fed9ce22bf8ae ("net/mlx5: E-Switch, Add API to create vport rx rules") Signed-off-by: Christophe JAILLET Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 35e256eb2f6e..b123f8a52ad8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -663,7 +663,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) esw->offloads.vport_rx_group = g; out: - kfree(flow_group_in); + kvfree(flow_group_in); return err; } -- cgit v1.2.3 From 9cfbfa701b55868cda4d638164887d5c74c7bfdd Mon Sep 17 00:00:00 2001 From: Cathy Zhou Date: Fri, 13 Apr 2018 11:28:37 -0700 Subject: ixgbe: cleanup sparse warnings Sparse complains valid conversions between restricted types, force attribute is used to avoid those warnings. Signed-off-by: Cathy Zhou Reviewed-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 13 ++++++----- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 25 +++++++++++++-------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 29 +++++++++++++++---------- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 16 +++++++------- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 9 ++++---- 7 files changed, 55 insertions(+), 41 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 42f63b943ea0..1e49716f52bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1436,7 +1436,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0, hi_dword = 0; + u32 bucket_hash = 0; + __be32 hi_dword = 0; int i; /* Apply masks to input data */ @@ -1475,7 +1476,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ - input->formatted.bkt_hash = bucket_hash & 0x1FFF; + input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); } /** @@ -1584,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } - switch (input_mask->formatted.flex_bytes & 0xFFFF) { + switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; @@ -1654,13 +1655,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); /* record vlan (little-endian) and flex_bytes(big-endian) */ - fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= ntohs(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; + fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1698,7 +1699,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, s32 err; /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; + fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8d038837f72b..3f5c350716bb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3626,7 +3626,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, (__force u32)cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index b5219e024f70..94b3165ff543 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -440,7 +440,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); - ddp->err = ddp_err; + ddp->err = (__force u32)ddp_err; ddp->sgl = NULL; ddp->sgc = 0; /* fall through */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 195c0b65eee2..99b170f1efd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -19,8 +19,9 @@ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, int i; for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), + (__force u32)cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_FLUSH(hw); reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); @@ -69,7 +70,8 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, int i; /* store the SPI (in bigendian) and IPidx */ - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, + (__force u32)cpu_to_le32((__force u32)spi)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); IXGBE_WRITE_FLUSH(hw); @@ -77,8 +79,9 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, /* store the key, salt, and mode */ for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), + (__force u32)cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); IXGBE_WRITE_FLUSH(hw); @@ -97,7 +100,8 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) /* store the ip address */ for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), + (__force u32)cpu_to_le32((__force u32)addr[i])); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); @@ -367,7 +371,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, struct xfrm_state *ret = NULL; rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, + (__force u32)spi) { if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, @@ -377,6 +382,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, xfrm_state_hold(ret); break; } + } rcu_read_unlock(); return ret; } @@ -569,7 +575,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - rsa.xs->id.spi); + (__force u64)rsa.xs->id.spi); } else { struct tx_sa tsa; @@ -653,7 +659,8 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) if (!ipsec->ip_tbl[ipi].ref_cnt) { memset(&ipsec->ip_tbl[ipi], 0, sizeof(struct rx_ip_sa)); - ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf); + ixgbe_ipsec_set_rx_ip(hw, ipi, + (__force __be32 *)zerobuf); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6652b201df5b..163b34a9572d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -727,8 +727,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) ring_desc = ""; pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, @@ -839,15 +839,15 @@ rx_ring_summary: /* Descriptor Done */ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), rx_buffer_info->skb, ring_desc); } else { pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), (u64)rx_buffer_info->dma, rx_buffer_info->skb, ring_desc); @@ -7751,7 +7751,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; @@ -9104,7 +9104,8 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, for (j = 0; field_ptr[j].val; j++) { if (field_ptr[j].off == off) { - field_ptr[j].val(input, mask, val, m); + field_ptr[j].val(input, mask, (__force u32)val, + (__force u32)m); input->filter.formatted.flow_type |= field_ptr[j].type; found_entry = true; @@ -9113,8 +9114,10 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, } if (nexthdr) { if (nexthdr->off == cls->knode.sel->keys[i].off && - nexthdr->val == cls->knode.sel->keys[i].val && - nexthdr->mask == cls->knode.sel->keys[i].mask) + nexthdr->val == + (__force u32)cls->knode.sel->keys[i].val && + nexthdr->mask == + (__force u32)cls->knode.sel->keys[i].mask) found_jump_field = true; else continue; @@ -9218,7 +9221,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || - nexthdr[i].m != cls->knode.sel->offmask) + nexthdr[i].m != + (__force u32)cls->knode.sel->offmask) return err; jump = kzalloc(sizeof(*jump), GFP_KERNEL); @@ -9991,7 +9995,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) } } else { for (i = 0; i < adapter->num_rx_queues; i++) - xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); } if (old_prog) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index fcae520647ce..1e6cf220f543 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -29,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_ip[0] = val; - mask->formatted.src_ip[0] = m; + input->filter.formatted.src_ip[0] = (__force __be32)val; + mask->formatted.src_ip[0] = (__force __be32)m; return 0; } @@ -38,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.dst_ip[0] = val; - mask->formatted.dst_ip[0] = m; + input->filter.formatted.dst_ip[0] = (__force __be32)val; + mask->formatted.dst_ip[0] = (__force __be32)m; return 0; } @@ -55,10 +55,10 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_port = val & 0xffff; - mask->formatted.src_port = m & 0xffff; - input->filter.formatted.dst_port = val >> 16; - mask->formatted.dst_port = m >> 16; + input->filter.formatted.src_port = (__force __be16)(val & 0xffff); + mask->formatted.src_port = (__force __be16)(m & 0xffff); + input->filter.formatted.dst_port = (__force __be16)(val >> 16); + mask->formatted.dst_port = (__force __be16)(m >> 16); return 0; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ac71ed76a54b..a8148c7126e5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -878,8 +878,9 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ - buffer.address = cpu_to_be32((offset + current_word) * 2); - buffer.length = cpu_to_be16(words_to_read * 2); + buffer.address = (__force u32)cpu_to_be32((offset + + current_word) * 2); + buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); buffer.pad2 = 0; buffer.pad3 = 0; @@ -1089,9 +1090,9 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); + buffer.address = (__force u32)cpu_to_be32(offset * 2); /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); + buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) -- cgit v1.2.3 From b212d815e77c72be921979119c715166cc8987b1 Mon Sep 17 00:00:00 2001 From: Mauro S M Rodrigues Date: Wed, 2 May 2018 17:26:28 -0300 Subject: ixgbe/ixgbevf: Free IRQ when PCI error recovery removes the device Since commit f7f37e7ff2b9 ("ixgbe: handle close/suspend race with netif_device_detach/present") ixgbe_close_suspend is called, from ixgbe_close, only if the device is present, i.e. if it isn't detached. That exposed a situation where IRQs weren't freed if a PCI error recovery system opts to remove the device. For such case the pci channel state is set to pci_channel_io_perm_failure and ixgbe_io_error_detected was returning PCI_ERS_RESULT_DISCONNECT before calling ixgbe_close_suspend consequentially not freeing IRQ and crashing when the remove handler calls pci_disable_device, hitting a BUG_ON at free_msi_irqs, which asserts that there is no non-free IRQ associated with the device to be removed: BUG_ON(irq_has_action(entry->irq + i)); The issue is fixed by calling the ixgbe_close_suspend before evaluate the pci channel state. Reported-by: Naresh Bannoth Reported-by: Abdul Haleem Signed-off-by: Mauro S M Rodrigues Reviewed-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 +++--- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 163b34a9572d..a52d92e182ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10935,14 +10935,14 @@ skip_bad_vf_detection: rtnl_lock(); netif_device_detach(netdev); + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } - if (netif_running(netdev)) - ixgbe_close_suspend(adapter); - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1ccce6cd51fc..9a939dcaf727 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4747,14 +4747,14 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, rtnl_lock(); netif_device_detach(netdev); + if (netif_running(netdev)) + ixgbevf_close_suspend(adapter); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } - if (netif_running(netdev)) - ixgbevf_close_suspend(adapter); - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); -- cgit v1.2.3 From 6710f970d9979d8f03f6e292bb729b2ee1526d0e Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 2 May 2018 23:59:30 -0400 Subject: ixgbe: release lock for the duration of ixgbe_suspend_close() Currently, during device_shutdown() ixgbe holds rtnl_lock for the duration of lengthy ixgbe_close_suspend(). On machines with multiple ixgbe cards this lock prevents scaling if device_shutdown() function is multi-threaded. It is not necessary to hold this lock during ixgbe_close_suspend() as it is not held when ixgbe_close() is called also during shutdown but for kexec case. Signed-off-by: Pavel Tatashin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a52d92e182ee..5ddfb93ed491 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6698,8 +6698,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) rtnl_lock(); netif_device_detach(netdev); - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* Suspend takes a long time, device_shutdown may be + * parallelized this function, so drop lock for the + * duration of this call. + */ + rtnl_unlock(); ixgbe_close_suspend(adapter); + rtnl_lock(); + } ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); -- cgit v1.2.3 From a8d9bb3d448f3c9a26a885a3104738098a6625c7 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Mon, 14 May 2018 11:16:11 -0700 Subject: ixgbe: force VF to grab new MAC on driver reload Do not validate the MAC address during a reset, unless the MAC was set on the host. This way the VF will get a new MAC address every time it reloads. Remove the "no MAC address assigned" message since it will get spammed on reset and it doesn't help much as the MAC on the VF is randomly generated. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 2649c06d877b..6f59933cdff7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -854,14 +854,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; - if (!is_zero_ether_addr(vf_mac)) { + if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, ETH_ALEN); } else { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - dev_warn(&adapter->pdev->dev, - "VF %d has no MAC address assigned, you may have to assign one manually\n", - vf); } /* -- cgit v1.2.3 From 6e7d0ba1e59b1a306761a731e67634c0f2efea2a Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Mon, 14 May 2018 11:16:16 -0700 Subject: ixgbevf: fix MAC address changes through ixgbevf_set_mac() Set hw->mac.perm_addr in ixgbevf_set_mac() in order to avoid losing the custom MAC on reset. This can happen in the following case: >ip link set $vf address $mac >ethtool -r $vf Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9a939dcaf727..083041129539 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4164,6 +4164,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) return -EPERM; ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(hw->mac.perm_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; -- cgit v1.2.3 From 43c89b16427f97607cdc9a64fe2a84935568af64 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Mon, 16 Apr 2018 09:55:36 -0700 Subject: ice: Update NVM AQ command functions This patch updates the NVM read/erase/update AQ commands to align with the latest specification. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 13 +++++++------ drivers/net/ethernet/intel/ice/ice_nvm.c | 7 ++++--- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7dc5f045e969..7541ec2270b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1049,7 +1049,9 @@ struct ice_aqc_set_event_mask { * NVM Update commands (indirect 0x0703) */ struct ice_aqc_nvm { - u8 cmd_flags; + __le16 offset_low; + u8 offset_high; + u8 cmd_flags; #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 @@ -1058,12 +1060,11 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) - u8 module_typeid; - __le16 length; + __le16 module_typeid; + __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF - __le32 offset; - __le32 addr_high; - __le32 addr_low; + __le32 addr_high; + __le32 addr_low; }; /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index fa7a69ac92b0..92da0a626ce0 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -16,7 +16,7 @@ * Read the NVM using the admin queue commands (0x0701) */ static enum ice_status -ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, +ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, struct ice_sq_cd *cd) { struct ice_aq_desc desc; @@ -33,8 +33,9 @@ ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; - cmd->module_typeid = module_typeid; - cmd->offset = cpu_to_le32(offset); + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = cpu_to_le16(length); return ice_aq_send_cmd(hw, &desc, data, length, cd); -- cgit v1.2.3 From 9611d6d6e24cd40ff887bdbb4dfe36a2ee88d488 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 17 May 2018 01:21:45 +0300 Subject: net: ethernet: ti: cpsw: disable mq feature for "AM33xx ES1.0" devices The early versions of am33xx devices, related to ES1.0 SoC revision have errata limiting mq support. That's the same errata as commit 7da1160002f1 ("drivers: net: cpsw: add am335x errata workarround for interrutps") AM33xx Errata [1] Advisory 1.0.9 http://www.ti.com/lit/er/sprz360f/sprz360f.pdf After additional investigation were found that drivers w/a is propagated on all AM33xx SoCs and on DM814x. But the errata exists only for ES1.0 of AM33xx family, limiting mq support for revisions after ES1.0. So, disable mq support only for related SoCs and use separate polls for revisions allowing mq. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 109 +++++++++++++++++++++++------------------ 1 file changed, 60 insertions(+), 49 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 28d893b93d30..a7285dddfd29 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -957,7 +958,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) { u32 ch_map; int num_tx, cur_budget, ch; @@ -984,7 +985,21 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) if (num_tx < budget) { napi_complete(napi_tx); writel(0xff, &cpsw->wr_regs->tx_en); - if (cpsw->quirk_irq && cpsw->tx_irq_disabled) { + } + + return num_tx; +} + +static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx; + + num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->tx_irq_disabled) { cpsw->tx_irq_disabled = false; enable_irq(cpsw->irqs_table[1]); } @@ -993,7 +1008,7 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) return num_tx; } -static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) { u32 ch_map; int num_rx, cur_budget, ch; @@ -1020,7 +1035,21 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) if (num_rx < budget) { napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); - if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { + } + + return num_rx; +} + +static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx; + + num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; enable_irq(cpsw->irqs_table[0]); } @@ -2364,9 +2393,9 @@ static void cpsw_get_channels(struct net_device *ndev, { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; + ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; ch->max_combined = 0; - ch->max_rx = CPSW_MAX_QUEUES; - ch->max_tx = CPSW_MAX_QUEUES; ch->max_other = 0; ch->other_count = 0; ch->rx_count = cpsw->rx_ch_num; @@ -2377,6 +2406,11 @@ static void cpsw_get_channels(struct net_device *ndev, static int cpsw_check_ch_settings(struct cpsw_common *cpsw, struct ethtool_channels *ch) { + if (cpsw->quirk_irq) { + dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); + return -EOPNOTSUPP; + } + if (ch->combined_count) return -EINVAL; @@ -2917,44 +2951,20 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) return ret; } -#define CPSW_QUIRK_IRQ BIT(0) - -static const struct platform_device_id cpsw_devtype[] = { - { - /* keep it for existing comaptibles */ - .name = "cpsw", - .driver_data = CPSW_QUIRK_IRQ, - }, { - .name = "am335x-cpsw", - .driver_data = CPSW_QUIRK_IRQ, - }, { - .name = "am4372-cpsw", - .driver_data = 0, - }, { - .name = "dra7-cpsw", - .driver_data = 0, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, cpsw_devtype); - -enum ti_cpsw_type { - CPSW = 0, - AM335X_CPSW, - AM4372_CPSW, - DRA7_CPSW, -}; - static const struct of_device_id cpsw_of_mtable[] = { - { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], }, - { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], }, - { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], }, - { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], }, + { .compatible = "ti,cpsw"}, + { .compatible = "ti,am335x-cpsw"}, + { .compatible = "ti,am4372-cpsw"}, + { .compatible = "ti,dra7-cpsw"}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, cpsw_of_mtable); +static const struct soc_device_attribute cpsw_soc_devices[] = { + { .family = "AM33xx", .revision = "ES1.0"}, + { /* sentinel */ } +}; + static int cpsw_probe(struct platform_device *pdev) { struct clk *clk; @@ -2966,9 +2976,9 @@ static int cpsw_probe(struct platform_device *pdev) void __iomem *ss_regs; void __iomem *cpts_regs; struct resource *res, *ss_res; - const struct of_device_id *of_id; struct gpio_descs *mode; u32 slave_offset, sliver_offset, slave_size; + const struct soc_device_attribute *soc; struct cpsw_common *cpsw; int ret = 0, i; int irq; @@ -3141,6 +3151,10 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dt_ret; } + soc = soc_device_match(cpsw_soc_devices); + if (soc) + cpsw->quirk_irq = 1; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { dev_err(priv->dev, "error initializing tx dma channel\n"); @@ -3180,19 +3194,16 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } - of_id = of_match_device(cpsw_of_mtable, &pdev->dev); - if (of_id) { - pdev->id_entry = of_id->data; - if (pdev->id_entry->driver_data) - cpsw->quirk_irq = true; - } - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &cpsw->napi_rx, + cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, + CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, + cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, + CPSW_POLL_WEIGHT); cpsw_split_res(ndev); /* register the network device */ -- cgit v1.2.3 From 7e878b605f05bf0c09aa0d81e05fb7878bf8c7d4 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Wed, 16 May 2018 19:09:23 -0700 Subject: bonding: introduce link change helper Introduce an new common helper to avoid redundancy. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a4cd7f6bfd4d..06efdf6a762b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2137,6 +2137,24 @@ static int bond_miimon_inspect(struct bonding *bond) return commit; } +static void bond_miimon_link_change(struct bonding *bond, + struct slave *slave, + char link) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_8023AD: + bond_3ad_handle_link_change(slave, link); + break; + case BOND_MODE_TLB: + case BOND_MODE_ALB: + bond_alb_handle_link_change(bond, slave, link); + break; + case BOND_MODE_XOR: + bond_update_slave_arr(bond, NULL); + break; + } +} + static void bond_miimon_commit(struct bonding *bond) { struct list_head *iter; @@ -2178,16 +2196,7 @@ static void bond_miimon_commit(struct bonding *bond) slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, slave->duplex ? "full" : "half"); - /* notify ad that the link status has changed */ - if (BOND_MODE(bond) == BOND_MODE_8023AD) - bond_3ad_handle_link_change(slave, BOND_LINK_UP); - - if (bond_is_lb(bond)) - bond_alb_handle_link_change(bond, slave, - BOND_LINK_UP); - - if (BOND_MODE(bond) == BOND_MODE_XOR) - bond_update_slave_arr(bond, NULL); + bond_miimon_link_change(bond, slave, BOND_LINK_UP); if (!bond->curr_active_slave || slave == primary) goto do_failover; @@ -2209,16 +2218,7 @@ static void bond_miimon_commit(struct bonding *bond) netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n", slave->dev->name); - if (BOND_MODE(bond) == BOND_MODE_8023AD) - bond_3ad_handle_link_change(slave, - BOND_LINK_DOWN); - - if (bond_is_lb(bond)) - bond_alb_handle_link_change(bond, slave, - BOND_LINK_DOWN); - - if (BOND_MODE(bond) == BOND_MODE_XOR) - bond_update_slave_arr(bond, NULL); + bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); if (slave == rcu_access_pointer(bond->curr_active_slave)) goto do_failover; -- cgit v1.2.3 From 93c65d13d8a0b7c272868d4a9779f96fc973df26 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 17 May 2018 11:46:41 +0800 Subject: vmxnet3: Replace msleep(1) with usleep_range() As documented in Documentation/timers/timers-howto.txt, replace msleep(1) with usleep_range(). Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/vmxnet3/vmxnet3_drv.c | 6 +++--- drivers/net/vmxnet3/vmxnet3_ethtool.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 9ebe2a689966..2234a334e96e 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2945,7 +2945,7 @@ vmxnet3_close(struct net_device *netdev) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); vmxnet3_quiesce_dev(adapter); @@ -2995,7 +2995,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); @@ -3567,7 +3567,7 @@ static void vmxnet3_shutdown_device(struct pci_dev *pdev) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) { diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 2ff27314e047..559db051a500 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -600,7 +600,7 @@ vmxnet3_set_ringparam(struct net_device *netdev, * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); -- cgit v1.2.3 From dcd3e73ae7bc82e303969e415d8c3d3147c1af6a Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 17 May 2018 10:29:30 +0200 Subject: net: mvpp2: align the ethtool ops definition Cosmetic patch to align the ethtool functions to ops definitions. This patch does not change in any way the driver's behaviour. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 6f410235987c..77dd91e3d962 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -7859,18 +7859,18 @@ static const struct net_device_ops mvpp2_netdev_ops = { }; static const struct ethtool_ops mvpp2_eth_tool_ops = { - .nway_reset = phy_ethtool_nway_reset, - .get_link = ethtool_op_get_link, - .set_coalesce = mvpp2_ethtool_set_coalesce, - .get_coalesce = mvpp2_ethtool_get_coalesce, - .get_drvinfo = mvpp2_ethtool_get_drvinfo, - .get_ringparam = mvpp2_ethtool_get_ringparam, - .set_ringparam = mvpp2_ethtool_set_ringparam, - .get_strings = mvpp2_ethtool_get_strings, - .get_ethtool_stats = mvpp2_ethtool_get_stats, - .get_sset_count = mvpp2_ethtool_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .set_coalesce = mvpp2_ethtool_set_coalesce, + .get_coalesce = mvpp2_ethtool_get_coalesce, + .get_drvinfo = mvpp2_ethtool_get_drvinfo, + .get_ringparam = mvpp2_ethtool_get_ringparam, + .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that -- cgit v1.2.3 From 4bb043262878c9fc9b51ad67fc229bb973d957c8 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 17 May 2018 10:29:31 +0200 Subject: net: mvpp2: phylink support Convert the PPv2 driver to implement phylink helpers, and use phylink in DT mode. The other mode supported is ACPI, which will need further work in order to be entirely compatible with phylink. The MAC and GoP configuration functions were completely moved to fit into the phylink helpers. When a PHY is always present between the MAC and the physical port, phylink only is used, but when this is not the case (the MAC directly is connected to the physical port) the link IRQ is used to detect changes in the link state and call phylink_mac_change. The ACPI mode do not uses phylink as of now, and the changes shouldn't impact its use. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvpp2.c | 846 +++++++++++++++++++++-------------- 2 files changed, 509 insertions(+), 338 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index ebe5c9148935..cc2f7701e71e 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -86,6 +86,7 @@ config MVPP2 depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA select MVMDIO + select PHYLINK ---help--- This driver supports the network interface units in the Marvell ARMADA 375, 7K and 8K SoCs. diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 77dd91e3d962..60093f1e6297 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -359,15 +360,23 @@ #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVPP2_GMAC_AN_SPEED_EN BIT(7) #define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) #define MVPP2_GMAC_STATUS0 0x10 #define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) +#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) +#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) +#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 @@ -379,6 +388,8 @@ #define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) #define MVPP22_GMAC_CTRL_4_REG 0x90 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_RX_FC_EN BIT(3) +#define MVPP22_CTRL4_TX_FC_EN BIT(4) #define MVPP22_CTRL4_DP_CLK_SEL BIT(5) #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) @@ -392,6 +403,7 @@ #define MVPP22_XLG_CTRL0_PORT_EN BIT(0) #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) #define MVPP22_XLG_CTRL1_REG 0x104 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 @@ -413,6 +425,7 @@ #define MVPP22_XLG_CTRL4_FWD_FC BIT(5) #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) +#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) /* SMI registers. PPv2.2 only, relative to priv->iface_base. */ #define MVPP22_SMI_MISC_CFG_REG 0x1204 @@ -1017,6 +1030,9 @@ struct mvpp2_port { /* Firmware node associated to the port */ struct fwnode_handle *fwnode; + /* Is a PHY always connected to the port */ + bool has_phy; + /* Per-port registers' base address */ void __iomem *base; void __iomem *stats_base; @@ -1044,12 +1060,11 @@ struct mvpp2_port { struct mutex gather_stats_lock; struct delayed_work stats_work; + struct device_node *of_node; + phy_interface_t phy_interface; - struct device_node *phy_node; + struct phylink *phylink; struct phy *comphy; - unsigned int link; - unsigned int duplex; - unsigned int speed; struct mvpp2_bm_pool *pool_long; struct mvpp2_bm_pool *pool_short; @@ -1338,6 +1353,12 @@ struct mvpp2_bm_pool { (addr) < (txq_pcpu)->tso_headers_dma + \ (txq_pcpu)->size * TSO_HEADER_SIZE) +/* The prototype is added here to be used in start_dev when using ACPI. This + * will be removed once phylink is used for all modes (dt+ACPI). + */ +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state); + /* Queue modes */ #define MVPP2_QDIST_SINGLE_MODE 0 #define MVPP2_QDIST_MULTI_MODE 1 @@ -4969,133 +4990,6 @@ static int mvpp22_comphy_init(struct mvpp2_port *port) return phy_power_on(port->comphy); } -static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) -{ - u32 val; - - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - val &= ~MVPP22_CTRL4_DP_CLK_SEL; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - } - - /* The port is connected to a copper PHY */ - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~MVPP2_GMAC_PORT_TYPE_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | - MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | - MVPP2_GMAC_AN_DUPLEX_EN; - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) - val |= MVPP2_GMAC_IN_BAND_AUTONEG; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) -{ - u32 val; - - /* Force link down */ - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_PASS; - val |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - /* Set the GMAC in a reset state */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val |= MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - /* Configure the PCS and in-band AN */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; - } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { - val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; - } - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - mvpp2_port_mii_gmac_configure_mode(port); - - /* Unset the GMAC reset state */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val &= ~MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - /* Stop forcing link down */ - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port) -{ - u32 val; - - if (port->gop_id != 0) - return; - - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - - val = readl(port->base + MVPP22_XLG_CTRL4_REG); - val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; - val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; - writel(val, port->base + MVPP22_XLG_CTRL4_REG); -} - -static void mvpp22_port_mii_set(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_CTRL3_REG); - val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR) - val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; - else - val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; - - writel(val, port->base + MVPP22_XLG_CTRL3_REG); - } -} - -static void mvpp2_port_mii_set(struct mvpp2_port *port) -{ - if (port->priv->hw_version == MVPP22) - mvpp22_port_mii_set(port); - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) - mvpp2_port_mii_gmac_configure(port); - else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) - mvpp2_port_mii_xlg_configure(port); -} - -static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= MVPP2_GMAC_FC_ADV_EN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - static void mvpp2_port_enable(struct mvpp2_port *port) { u32 val; @@ -5126,8 +5020,11 @@ static void mvpp2_port_disable(struct mvpp2_port *port) (port->phy_interface == PHY_INTERFACE_MODE_XAUI || port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val &= ~(MVPP22_XLG_CTRL0_PORT_EN | - MVPP22_XLG_CTRL0_MAC_RESET_DIS); + val &= ~MVPP22_XLG_CTRL0_PORT_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + /* Disable & reset should be done separately */ + val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } else { val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); @@ -5147,13 +5044,14 @@ static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) } /* Configure loopback port */ -static void mvpp2_port_loopback_set(struct mvpp2_port *port) +static void mvpp2_port_loopback_set(struct mvpp2_port *port, + const struct phylink_link_state *state) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); - if (port->speed == 1000) + if (state->speed == 1000) val |= MVPP2_GMAC_GMII_LB_EN_MASK; else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; @@ -5331,10 +5229,6 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) int tx_port_num, val, queue, ptxq, lrxq; if (port->priv->hw_version == MVPP21) { - /* Configure port to loopback if needed */ - if (port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port); - /* Update TX FIFO MIN Threshold */ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; @@ -6382,6 +6276,11 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) } } + if (port->phylink) { + phylink_mac_change(port->phylink, link); + goto handled; + } + if (!netif_running(dev) || !event) goto handled; @@ -6406,111 +6305,6 @@ handled: return IRQ_HANDLED; } -static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port, - struct phy_device *phydev) -{ - u32 val; - - if (port->phy_interface != PHY_INTERFACE_MODE_RGMII && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID && - port->phy_interface != PHY_INTERFACE_MODE_SGMII) - return; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX | - MVPP2_GMAC_AN_SPEED_EN | - MVPP2_GMAC_AN_DUPLEX_EN); - - if (phydev->duplex) - val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - - if (phydev->speed == SPEED_1000) - val |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVPP2_GMAC_CONFIG_MII_SPEED; - - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -/* Adjust link */ -static void mvpp2_link_event(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - bool link_reconfigured = false; - u32 val; - - if (phydev->link) { - if (port->phy_interface != phydev->interface && port->comphy) { - /* disable current port for reconfiguration */ - mvpp2_interrupts_disable(port); - netif_carrier_off(port->dev); - mvpp2_port_disable(port); - phy_power_off(port->comphy); - - /* comphy reconfiguration */ - port->phy_interface = phydev->interface; - mvpp22_comphy_init(port); - - /* gop/mac reconfiguration */ - mvpp22_gop_init(port); - mvpp2_port_mii_set(port); - - link_reconfigured = true; - } - - if ((port->speed != phydev->speed) || - (port->duplex != phydev->duplex)) { - mvpp2_gmac_set_autoneg(port, phydev); - - port->duplex = phydev->duplex; - port->speed = phydev->speed; - } - } - - if (phydev->link != port->link || link_reconfigured) { - port->link = phydev->link; - - if (phydev->link) { - if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= (MVPP2_GMAC_FORCE_LINK_PASS | - MVPP2_GMAC_FORCE_LINK_DOWN); - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - } - - mvpp2_interrupts_enable(port); - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } else { - port->duplex = -1; - port->speed = 0; - - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - mvpp2_ingress_disable(port); - mvpp2_egress_disable(port); - - mvpp2_port_disable(port); - mvpp2_interrupts_disable(port); - } - - phy_print_status(phydev); - } -} - static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) { ktime_t interval; @@ -7118,11 +6912,29 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) return rx_done; } -/* Set hw internals when starting port */ -static void mvpp2_start_dev(struct mvpp2_port *port) +static void mvpp22_mode_reconfigure(struct mvpp2_port *port) { - struct net_device *ndev = port->dev; - int i; + u32 ctrl3; + + /* comphy reconfiguration */ + mvpp22_comphy_init(port); + + /* gop reconfiguration */ + mvpp22_gop_init(port); + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0) { + ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); + ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR) + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); + } if (port->gop_id == 0 && (port->phy_interface == PHY_INTERFACE_MODE_XAUI || @@ -7130,6 +6942,12 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp2_xlg_max_rx_size_set(port); else mvpp2_gmac_max_rx_size_set(port); +} + +/* Set hw internals when starting port */ +static void mvpp2_start_dev(struct mvpp2_port *port) +{ + int i; mvpp2_txp_max_tx_size_set(port); @@ -7139,42 +6957,39 @@ static void mvpp2_start_dev(struct mvpp2_port *port) /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); - if (port->priv->hw_version == MVPP22) { - mvpp22_comphy_init(port); - mvpp22_gop_init(port); + if (port->priv->hw_version == MVPP22) + mvpp22_mode_reconfigure(port); + + if (port->phylink) { + phylink_start(port->phylink); + } else { + /* Phylink isn't used as of now for ACPI, so the MAC has to be + * configured manually when the interface is started. This will + * be removed as soon as the phylink ACPI support lands in. + */ + struct phylink_link_state state = { + .interface = port->phy_interface, + .link = 1, + }; + mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); } - mvpp2_port_mii_set(port); - mvpp2_port_enable(port); - if (ndev->phydev) - phy_start(ndev->phydev); netif_tx_start_all_queues(port->dev); } /* Set hw internals when stopping port */ static void mvpp2_stop_dev(struct mvpp2_port *port) { - struct net_device *ndev = port->dev; int i; - /* Stop new packets from arriving to RXQs */ - mvpp2_ingress_disable(port); - - mdelay(10); - /* Disable interrupts on all CPUs */ mvpp2_interrupts_disable(port); for (i = 0; i < port->nqvecs; i++) napi_disable(&port->qvecs[i].napi); - netif_carrier_off(port->dev); - netif_tx_stop_all_queues(port->dev); - - mvpp2_egress_disable(port); - mvpp2_port_disable(port); - if (ndev->phydev) - phy_stop(ndev->phydev); + if (port->phylink) + phylink_stop(port->phylink); phy_power_off(port->comphy); } @@ -7233,40 +7048,6 @@ static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; } -static int mvpp2_phy_connect(struct mvpp2_port *port) -{ - struct phy_device *phy_dev; - - /* No PHY is attached */ - if (!port->phy_node) - return 0; - - phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, - port->phy_interface); - if (!phy_dev) { - netdev_err(port->dev, "cannot connect to phy\n"); - return -ENODEV; - } - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->advertising = phy_dev->supported; - - port->link = 0; - port->duplex = 0; - port->speed = 0; - - return 0; -} - -static void mvpp2_phy_disconnect(struct mvpp2_port *port) -{ - struct net_device *ndev = port->dev; - - if (!ndev->phydev) - return; - - phy_disconnect(ndev->phydev); -} - static int mvpp2_irqs_init(struct mvpp2_port *port) { int err, i; @@ -7350,6 +7131,7 @@ static int mvpp2_open(struct net_device *dev) struct mvpp2 *priv = port->priv; unsigned char mac_bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + bool valid = false; int err; err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); @@ -7392,7 +7174,19 @@ static int mvpp2_open(struct net_device *dev) goto err_cleanup_txqs; } - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) { + /* Phylink isn't supported yet in ACPI mode */ + if (port->of_node) { + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(port->dev, "could not attach PHY (%d)\n", + err); + goto err_free_irq; + } + + valid = true; + } + + if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, dev->name, port); if (err) { @@ -7402,14 +7196,20 @@ static int mvpp2_open(struct net_device *dev) } mvpp22_gop_setup_irq(port); - } - /* In default link is down */ - netif_carrier_off(port->dev); + /* In default link is down */ + netif_carrier_off(port->dev); - err = mvpp2_phy_connect(port); - if (err < 0) - goto err_free_link_irq; + valid = true; + } else { + port->link_irq = 0; + } + + if (!valid) { + netdev_err(port->dev, + "invalid configuration: no dt or link IRQ"); + goto err_free_irq; + } /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); @@ -7426,9 +7226,6 @@ static int mvpp2_open(struct net_device *dev) return 0; -err_free_link_irq: - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) - free_irq(port->link_irq, port); err_free_irq: mvpp2_irqs_deinit(port); err_cleanup_txqs: @@ -7442,17 +7239,17 @@ static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - struct mvpp2 *priv = port->priv; int cpu; mvpp2_stop_dev(port); - mvpp2_phy_disconnect(port); /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true); - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) + if (port->phylink) + phylink_disconnect_phy(port->phylink); + if (port->link_irq) free_irq(port->link_irq, port); mvpp2_irqs_deinit(port); @@ -7658,16 +7455,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - int ret; + struct mvpp2_port *port = netdev_priv(dev); - if (!dev->phydev) + if (!port->phylink) return -ENOTSUPP; - ret = phy_mii_ioctl(dev->phydev, ifr, cmd); - if (!ret) - mvpp2_link_event(dev); - - return ret; + return phylink_mii_ioctl(port->phylink, ifr, cmd); } static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) @@ -7714,6 +7507,16 @@ static int mvpp2_set_features(struct net_device *dev, /* Ethtool methods */ +static int mvpp2_ethtool_nway_reset(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(port->phylink); +} + /* Set interrupt coalescing for ethtools */ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -7842,6 +7645,50 @@ err_out: return err; } +static void mvpp2_ethtool_get_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return; + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_set_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -7859,7 +7706,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { }; static const struct ethtool_ops mvpp2_eth_tool_ops = { - .nway_reset = phy_ethtool_nway_reset, + .nway_reset = mvpp2_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvpp2_ethtool_set_coalesce, .get_coalesce = mvpp2_ethtool_get_coalesce, @@ -7869,8 +7716,10 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_strings = mvpp2_ethtool_get_strings, .get_ethtool_stats = mvpp2_ethtool_get_stats, .get_sset_count = mvpp2_ethtool_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_pauseparam = mvpp2_ethtool_get_pause_param, + .set_pauseparam = mvpp2_ethtool_set_pause_param, + .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, + .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -8172,18 +8021,330 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, eth_hw_addr_random(dev); } +static void mvpp2_phylink_validate(struct net_device *dev, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 10000baseT_Full); + + if (state->interface == PHY_INTERFACE_MODE_10GKR) { + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void mvpp22_xlg_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = readl(port->base + MVPP22_XLG_STATUS); + state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); + + state->pause = 0; + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_TX; + if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_RX; +} + +static void mvpp2_gmac_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_STATUS0); + + state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); + state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); + state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); + + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + + state->pause = 0; + if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) + state->pause |= MLO_PAUSE_RX; + if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) + state->pause |= MLO_PAUSE_TX; +} + +static int mvpp2_phylink_mac_link_state(struct net_device *dev, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { + u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); + mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { + mvpp22_xlg_link_state(port, state); + return 1; + } + } + + mvpp2_gmac_link_state(port, state); + return 1; +} + +static void mvpp2_mac_an_restart(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) + return; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + /* The RESTART_AN bit is cleared by the h/w after restarting the AN + * process. + */ + val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 ctrl0, ctrl4; + + ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); + ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); + + if (state->pause & MLO_PAUSE_TX) + ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + + ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; + ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK; + + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); +} + +static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 an, ctrl0, ctrl2, ctrl4; + + an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + + /* Force link down */ + an &= ~MVPP2_GMAC_FORCE_LINK_PASS; + an |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state */ + ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + + an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | + MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | + MVPP2_GMAC_FORCE_LINK_DOWN); + ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; + ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); + + if (!phy_interface_mode_is_rgmii(state->interface)) + an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + + if (state->duplex) + an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + if (phylink_test(state->advertising, Pause)) + an |= MVPP2_GMAC_FC_ADV_EN; + if (phylink_test(state->advertising, Asym_Pause)) + an |= MVPP2_GMAC_FC_ADV_ASM_EN; + + if (state->interface == PHY_INTERFACE_MODE_SGMII) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG; + ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + + ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; + + if (state->speed == SPEED_1000) + an |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + an |= MVPP2_GMAC_CONFIG_MII_SPEED; + + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } + + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Check for invalid configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { + netdev_err(dev, "Invalid mode on %s\n", dev->name); + return; + } + + netif_tx_stop_all_queues(port->dev); + if (!port->has_phy) + netif_carrier_off(port->dev); + + /* Make sure the port is disabled when reconfiguring the mode */ + mvpp2_port_disable(port); + + if (port->priv->hw_version == MVPP22 && + port->phy_interface != state->interface) { + port->phy_interface = state->interface; + + /* Reconfigure the serdes lanes */ + phy_power_off(port->comphy); + mvpp22_mode_reconfigure(port); + } + + /* mac (re)configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR) + mvpp2_xlg_config(port, mode, state); + else if (phy_interface_mode_is_rgmii(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) + mvpp2_gmac_config(port, mode, state); + + if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port, state); + + /* If the port already was up, make sure it's still in the same state */ + if (state->link || !port->has_phy) { + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + if (!port->has_phy) + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } +} + +static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; + if (phy_interface_mode_is_rgmii(interface)) + val |= MVPP2_GMAC_FORCE_LINK_PASS; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_tx_wake_all_queues(dev); +} + +static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + netif_tx_stop_all_queues(dev); + mvpp2_egress_disable(port); + mvpp2_ingress_disable(port); + + /* When using link interrupts to notify phylink of a MAC state change, + * we do not want the port to be disabled (we want to receive further + * interrupts, to be notified when the port will have a link later). + */ + if (!port->has_phy) + return; + + mvpp2_port_disable(port); +} + +static const struct phylink_mac_ops mvpp2_phylink_ops = { + .validate = mvpp2_phylink_validate, + .mac_link_state = mvpp2_phylink_mac_link_state, + .mac_an_restart = mvpp2_mac_an_restart, + .mac_config = mvpp2_mac_config, + .mac_link_up = mvpp2_mac_link_up, + .mac_link_down = mvpp2_mac_link_down, +}; + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct fwnode_handle *port_fwnode, struct mvpp2 *priv) { - struct device_node *phy_node; struct phy *comphy = NULL; struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; struct device_node *port_node = to_of_node(port_fwnode); struct net_device *dev; struct resource *res; + struct phylink *phylink; char *mac_from = ""; unsigned int ntxqs, nrxqs; bool has_tx_irqs; @@ -8212,11 +8373,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!dev) return -ENOMEM; - if (port_node) - phy_node = of_parse_phandle(port_node, "phy", 0); - else - phy_node = NULL; - phy_mode = fwnode_get_phy_mode(port_fwnode); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy mode\n"); @@ -8249,6 +8405,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port = netdev_priv(dev); port->dev = dev; port->fwnode = port_fwnode; + port->has_phy = !!of_find_property(port_node, "phy", NULL); port->ntxqs = ntxqs; port->nrxqs = nrxqs; port->priv = priv; @@ -8279,7 +8436,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, else port->first_rxq = port->id * priv->max_port_rxqs; - port->phy_node = phy_node; + port->of_node = port_node; port->phy_interface = phy_mode; port->comphy = comphy; @@ -8340,9 +8497,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, mvpp2_port_periodic_xon_disable(port); - if (priv->hw_version == MVPP21) - mvpp2_port_fc_adv_enable(port); - mvpp2_port_reset(port); port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); @@ -8386,10 +8540,23 @@ static int mvpp2_port_probe(struct platform_device *pdev, /* 9704 == 9728 - 20 and rounding to 8 */ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; + /* Phylink isn't used w/ ACPI as of now */ + if (port_node) { + phylink = phylink_create(dev, port_fwnode, phy_mode, + &mvpp2_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_port_pcpu; + } + port->phylink = phylink; + } else { + port->phylink = NULL; + } + err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_port_pcpu; + goto err_phylink; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); @@ -8397,6 +8564,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, return 0; +err_phylink: + if (port->phylink) + phylink_destroy(port->phylink); err_free_port_pcpu: free_percpu(port->pcpu); err_free_txq_pcpu: @@ -8410,7 +8580,6 @@ err_free_irq: err_deinit_qvecs: mvpp2_queue_vectors_deinit(port); err_free_netdev: - of_node_put(phy_node); free_netdev(dev); return err; } @@ -8421,7 +8590,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); - of_node_put(port->phy_node); + if (port->phylink) + phylink_destroy(port->phylink); free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < port->ntxqs; i++) -- cgit v1.2.3 From d97c9f4ab000bd7bc0bbca78e65add9081096002 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 17 May 2018 10:29:34 +0200 Subject: net: mvpp2: 1000baseX support This patch adds the 1000Base-X PHY mode support in the Marvell PPv2 driver. 1000Base-X is quite close the SGMII and uses nearly the same code path. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 72 +++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 60093f1e6297..ece61f1727e4 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4870,6 +4870,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port) mvpp22_gop_init_rgmii(port); break; case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: mvpp22_gop_init_sgmii(port); break; case PHY_INTERFACE_MODE_10GKR: @@ -4907,7 +4908,8 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) u32 val; if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { /* Enable the GMAC link status irq for this port */ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; @@ -4937,7 +4939,8 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port) } if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); @@ -4949,7 +4952,8 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port) u32 val; if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { val = readl(port->base + MVPP22_GMAC_INT_MASK); val |= MVPP22_GMAC_INT_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_MASK); @@ -4974,6 +4978,7 @@ static int mvpp22_comphy_init(struct mvpp2_port *port) switch (port->phy_interface) { case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: mode = PHY_MODE_SGMII; break; case PHY_INTERFACE_MODE_10GKR: @@ -5056,7 +5061,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port, else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; @@ -6266,7 +6272,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) link = true; } } else if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { val = readl(port->base + MVPP22_GMAC_INT_STAT); if (val & MVPP22_GMAC_INT_STAT_LINK) { event = true; @@ -8032,20 +8039,25 @@ static void mvpp2_phylink_validate(struct net_device *dev, phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 10000baseT_Full); - - if (state->interface == PHY_INTERFACE_MODE_10GKR) { + switch (state->interface) { + case PHY_INTERFACE_MODE_10GKR: phylink_set(mask, 10000baseCR_Full); phylink_set(mask, 10000baseSR_Full); phylink_set(mask, 10000baseLR_Full); phylink_set(mask, 10000baseLRM_Full); phylink_set(mask, 10000baseER_Full); phylink_set(mask, 10000baseKR_Full); + /* Fall-through */ + default: + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 10000baseT_Full); + /* Fall-through */ + case PHY_INTERFACE_MODE_1000BASEX: + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); } bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -8084,12 +8096,18 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port, state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); - if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_1000BASEX: state->speed = SPEED_1000; - else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) - state->speed = SPEED_100; - else - state->speed = SPEED_10; + break; + default: + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + } state->pause = 0; if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) @@ -8181,8 +8199,18 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); - if (!phy_interface_mode_is_rgmii(state->interface)) + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { + /* 1000BaseX port cannot negotiate speed nor can it negotiate + * duplex: they are always operating with a fixed speed of + * 1000Mbps in full duplex, so force 1000 speed and full duplex + * here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + an |= MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + } else if (!phy_interface_mode_is_rgmii(state->interface)) { an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + } if (state->duplex) an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; @@ -8191,7 +8219,8 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, if (phylink_test(state->advertising, Asym_Pause)) an |= MVPP2_GMAC_FC_ADV_ASM_EN; - if (state->interface == PHY_INTERFACE_MODE_SGMII) { + if (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX) { an |= MVPP2_GMAC_IN_BAND_AUTONEG; ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; @@ -8256,7 +8285,8 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, if (state->interface == PHY_INTERFACE_MODE_10GKR) mvpp2_xlg_config(port, mode, state); else if (phy_interface_mode_is_rgmii(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII) + state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX) mvpp2_gmac_config(port, mode, state); if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) -- cgit v1.2.3 From a6fe31de866f35d2d926a0ee13125fead935846b Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 17 May 2018 10:29:35 +0200 Subject: net: mvpp2: 2500baseX support This patch adds the 2500Base-X PHY mode support in the Marvell PPv2 driver. 2500Base-X is quite close to 1000Base-X and SGMII modes and uses nearly the same code path. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 51 +++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ece61f1727e4..5e580482769e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4871,6 +4871,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port) break; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: mvpp22_gop_init_sgmii(port); break; case PHY_INTERFACE_MODE_10GKR: @@ -4909,7 +4910,8 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) if (phy_interface_mode_is_rgmii(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { /* Enable the GMAC link status irq for this port */ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; @@ -4940,7 +4942,8 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port) if (phy_interface_mode_is_rgmii(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); @@ -4953,7 +4956,8 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port) if (phy_interface_mode_is_rgmii(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { val = readl(port->base + MVPP22_GMAC_INT_MASK); val |= MVPP22_GMAC_INT_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_MASK); @@ -4968,6 +4972,16 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port) mvpp22_gop_unmask_irq(port); } +/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). + * + * The PHY mode used by the PPv2 driver comes from the network subsystem, while + * the one given to the COMPHY comes from the generic PHY subsystem. Hence they + * differ. + * + * The COMPHY configures the serdes lanes regardless of the actual use of the + * lanes by the physical layer. This is why configurations like + * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. + */ static int mvpp22_comphy_init(struct mvpp2_port *port) { enum phy_mode mode; @@ -4981,6 +4995,9 @@ static int mvpp22_comphy_init(struct mvpp2_port *port) case PHY_INTERFACE_MODE_1000BASEX: mode = PHY_MODE_SGMII; break; + case PHY_INTERFACE_MODE_2500BASEX: + mode = PHY_MODE_2500SGMII; + break; case PHY_INTERFACE_MODE_10GKR: mode = PHY_MODE_10GKR; break; @@ -5062,7 +5079,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port, val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; @@ -6273,7 +6291,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) } } else if (phy_interface_mode_is_rgmii(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { val = readl(port->base + MVPP22_GMAC_INT_STAT); if (val & MVPP22_GMAC_INT_STAT_LINK) { event = true; @@ -8056,8 +8075,10 @@ static void mvpp2_phylink_validate(struct net_device *dev, phylink_set(mask, 10000baseT_Full); /* Fall-through */ case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); } bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -8100,6 +8121,9 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port, case PHY_INTERFACE_MODE_1000BASEX: state->speed = SPEED_1000; break; + case PHY_INTERFACE_MODE_2500BASEX: + state->speed = SPEED_2500; + break; default: if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) state->speed = SPEED_1000; @@ -8199,11 +8223,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); - if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { - /* 1000BaseX port cannot negotiate speed nor can it negotiate - * duplex: they are always operating with a fixed speed of - * 1000Mbps in full duplex, so force 1000 speed and full duplex - * here. + if (state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. */ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; an |= MVPP2_GMAC_CONFIG_GMII_SPEED | @@ -8220,7 +8245,8 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, an |= MVPP2_GMAC_FC_ADV_ASM_EN; if (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX) { + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { an |= MVPP2_GMAC_IN_BAND_AUTONEG; ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; @@ -8286,7 +8312,8 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, mvpp2_xlg_config(port, mode, state); else if (phy_interface_mode_is_rgmii(state->interface) || state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) mvpp2_gmac_config(port, mode, state); if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) -- cgit v1.2.3 From 914365f1c97d3aa1881fd6fc477c4799a39c00f9 Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Thu, 17 May 2018 10:34:25 +0200 Subject: net: mvpp2: avoid checking for free aggregated descriptors twice Avoid repeating the check for free aggregated descriptors when it already failed at the beginning of the function. Signed-off-by: Yan Markman [Antoine: commit message] Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 5e580482769e..de664b3f45f2 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5487,11 +5487,10 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; - } - - if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) - return -ENOMEM; + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) + return -ENOMEM; + } return 0; } -- cgit v1.2.3 From 5b0ab2f41d7593d87762dce1cbaf548f988a2917 Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Thu, 17 May 2018 10:34:26 +0200 Subject: net: mvpp2: set mac address does not require the stop/start sequence Remove special stop/start handling from the set_mac_address callback. All this special care is not needed, and can be removed. It also simplifies the up/down status in the driver and helps avoiding possible link status mismatch issues. Signed-off-by: Yan Markman [Antoine: commit message] Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 38 +++++++----------------------------- 1 file changed, 7 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index de664b3f45f2..8c9496002bd7 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -7357,42 +7357,18 @@ static void mvpp2_set_rx_mode(struct net_device *dev) static int mvpp2_set_mac_address(struct net_device *dev, void *p) { - struct mvpp2_port *port = netdev_priv(dev); const struct sockaddr *addr = p; int err; - if (!is_valid_ether_addr(addr->sa_data)) { - err = -EADDRNOTAVAIL; - goto log_error; - } - - if (!netif_running(dev)) { - err = mvpp2_prs_update_mac_da(dev, addr->sa_data); - if (!err) - return 0; - /* Reconfigure parser to accept the original MAC address */ - err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); - if (err) - goto log_error; - } - - mvpp2_stop_dev(port); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; err = mvpp2_prs_update_mac_da(dev, addr->sa_data); - if (!err) - goto out_start; - - /* Reconfigure parser accept the original MAC address */ - err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); - if (err) - goto log_error; -out_start: - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - return 0; -log_error: - netdev_err(dev, "failed to change MAC address\n"); + if (err) { + /* Reconfigure parser accept the original MAC address */ + mvpp2_prs_update_mac_da(dev, dev->dev_addr); + netdev_err(dev, "failed to change MAC address\n"); + } return err; } -- cgit v1.2.3 From 934e0f8330a483a582805c640ef9885b4e69aa63 Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Thu, 17 May 2018 10:34:27 +0200 Subject: net: mvpp2: print rx error with rate-limit Prevent flood of RX error prints during heavy traffic with weak signal in link by checking net_ratelimit() before using netdev_err(). Signed-off-by: Yan Markman [Antoine: small rework, commit message] Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 8c9496002bd7..7ba7e5938c2e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6381,21 +6381,23 @@ static void mvpp2_rx_error(struct mvpp2_port *port, { u32 status = mvpp2_rxdesc_status_get(port, rx_desc); size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); + char *err_str = NULL; switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: - netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", - status, sz); + err_str = "crc"; break; case MVPP2_RXD_ERR_OVERRUN: - netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", - status, sz); + err_str = "overrun"; break; case MVPP2_RXD_ERR_RESOURCE: - netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", - status, sz); + err_str = "resource"; break; } + if (err_str && net_ratelimit()) + netdev_err(port->dev, + "bad rx status %08x (%s error), size=%zu\n", + status, err_str, sz); } /* Handle RX checksum offload */ -- cgit v1.2.3 From 3b734ff604d32e3e1d7db877d801818967ad325f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 17 May 2018 12:06:43 +0200 Subject: nfp: flower: fix error path during representor creation Don't store repr pointer to reprs array until the representor is successfully created. This avoids message about "representor destruction" even when it was never created. Also it cleans-up the flow. Also, check return value after port alloc. Signed-off-by: Jiri Pirko Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.c | 13 +++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 9 +++++++-- drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 1 + 3 files changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 84e3b9f5abb1..4e67c0cbf9f0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -247,12 +247,16 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, err = -ENOMEM; goto err_reprs_clean; } - RCU_INIT_POINTER(reprs->reprs[i], repr); /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); port = nfp_port_alloc(app, port_type, repr); + if (IS_ERR(port)) { + err = PTR_ERR(port); + nfp_repr_free(repr); + goto err_reprs_clean; + } if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; @@ -271,9 +275,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } + RCU_INIT_POINTER(reprs->reprs[i], repr); nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, repr->name); @@ -344,16 +350,17 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) err = -ENOMEM; goto err_reprs_clean; } - RCU_INIT_POINTER(reprs->reprs[phys_port], repr); port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } @@ -365,6 +372,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } @@ -373,6 +381,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) eth_tbl->ports[i].base, phys_port); + RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 0cd077addb26..6e79da91e475 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -348,12 +348,17 @@ err_clean: return err; } -static void nfp_repr_free(struct nfp_repr *repr) +static void __nfp_repr_free(struct nfp_repr *repr) { free_percpu(repr->stats); free_netdev(repr->netdev); } +void nfp_repr_free(struct net_device *netdev) +{ + __nfp_repr_free(netdev_priv(netdev)); +} + struct net_device *nfp_repr_alloc(struct nfp_app *app) { struct net_device *netdev; @@ -385,7 +390,7 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr) nfp_info(repr->app->cpp, "Destroying Representor(%s)\n", repr->netdev->name); nfp_repr_clean(repr); - nfp_repr_free(repr); + __nfp_repr_free(repr); } void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index a621e8ff528e..cd756a15445f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -123,6 +123,7 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); +void nfp_repr_free(struct net_device *netdev); struct net_device *nfp_repr_alloc(struct nfp_app *app); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, -- cgit v1.2.3 From 8a8633978b842c88fbcfe00d4e5dde96048f630e Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 17 May 2018 12:05:00 -0700 Subject: qede: Add build_skb() support. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch makes use of build_skb() throughout in driver's receieve data path [HW gro flow and non HW gro flow]. With this, driver can build skb directly from the page segments which are already mapped to the hardware instead of allocating new SKB via netdev_alloc_skb() and memcpy the data which is quite costly. This really improves performance (keeping same or slight gain in rx throughput) in terms of CPU utilization which is significantly reduced [almost half] in non HW gro flow where for every incoming MTU sized packet driver had to allocate skb, memcpy headers etc. Additionally in that flow, it also gets rid of bunch of additional overheads [eth_get_headlen() etc.] to split headers and data in the skb. Tested with: system: 2 sockets, 4 cores per socket, hyperthreading, 2x4x2=16 cores iperf [server]: iperf -s iperf [client]: iperf -c -t 500 -i 10 -P 32 HW GRO off – w/o build_skb(), throughput: 36.8 Gbits/sec Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle Average: all 0.59 0.00 32.93 0.00 0.00 43.07 0.00 0.00 23.42 HW GRO off - with build_skb(), throughput: 36.9 Gbits/sec Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle Average: all 0.70 0.00 31.70 0.00 0.00 25.68 0.00 0.00 41.92 HW GRO on - w/o build_skb(), throughput: 36.9 Gbits/sec Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle Average: all 0.86 0.00 24.14 0.00 0.00 6.59 0.00 0.00 68.41 HW GRO on - with build_skb(), throughput: 37.5 Gbits/sec Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle Average: all 0.87 0.00 23.75 0.00 0.00 6.19 0.00 0.00 69.19 Signed-off-by: Ariel Elior Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 5 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 3 +- drivers/net/ethernet/qlogic/qede/qede_fp.c | 227 +++++++++++++----------- drivers/net/ethernet/qlogic/qede/qede_main.c | 76 ++------ 4 files changed, 137 insertions(+), 174 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 9935978c5542..2d3f09ed413b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -290,15 +290,12 @@ struct qede_agg_info { * aggregation. */ struct sw_rx_data buffer; - dma_addr_t buffer_mapping; - struct sk_buff *skb; /* We need some structs from the start cookie until termination */ u16 vlan_tag; - u16 start_cqe_bd_len; - u8 start_cqe_placement_offset; + bool tpa_start_fail; u8 state; u8 frag_id; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ecbf1ded7a39..8c6fdad91986 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1508,7 +1508,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) len = le16_to_cpu(fp_cqe->len_on_first_bd); data_ptr = (u8 *)(page_address(sw_rx_data->data) + fp_cqe->placement_offset + - sw_rx_data->page_offset); + sw_rx_data->page_offset + + rxq->rx_headroom); if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && ether_addr_equal(data_ptr + ETH_ALEN, edev->ndev->dev_addr)) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 14941303189d..6c702399b801 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, /* Add one frag and update the appropriate fields in the skb */ skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, + current_bd->data, + current_bd->page_offset + rxq->rx_headroom, len_on_bd); if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { @@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev, goto out; } - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; + qede_rx_bd_ring_consume(rxq); skb->data_len += len_on_bd; skb->truesize += rxq->rx_buf_seg_size; @@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag) return QEDE_CSUM_UNNECESSARY | tcsum; } +static inline struct sk_buff * +qede_build_skb(struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb; + void *buf; + + buf = page_address(bd->data) + bd->page_offset; + skb = build_skb(buf, rxq->rx_buf_seg_size); + + skb_reserve(skb, pad); + skb_put(skb, len); + + return skb; +} + +static struct sk_buff * +qede_tpa_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad, + bool alloc_skb) +{ + struct sk_buff *skb; + + skb = qede_build_skb(rxq, bd, len, pad); + bd->page_offset += rxq->rx_buf_seg_size; + + if (bd->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + DP_NOTICE(edev, + "Failed to allocate RX buffer for tpa start\n"); + bd->page_offset -= rxq->rx_buf_seg_size; + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } + } else { + page_ref_inc(bd->data); + qede_reuse_page(rxq, bd); + } + + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + +static struct sk_buff * +qede_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb = NULL; + + /* For smaller frames still need to allocate skb, memcpy + * data and benefit in reusing the page segment instead of + * un-mapping it. + */ + if ((len + pad <= edev->rx_copybreak)) { + unsigned int offset = bd->page_offset + pad; + + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, pad); + memcpy(skb_put(skb, len), + page_address(bd->data) + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + skb = qede_build_skb(rxq, bd, len, pad); + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + static void qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) { struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; + u16 pad; sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + pad = cqe->placement_offset + rxq->rx_headroom; - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; + tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, + le16_to_cpu(cqe->len_on_first_bd), + pad, false); + tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; + tpa_info->buffer.mapping = sw_rx_data_cons->mapping; - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + + /* Consume from ring but do not produce since + * this might be used by FW still, it will be re-used + * at TPA end. + */ + tpa_info->tpa_start_fail = true; + qede_rx_bd_ring_consume(rxq); tpa_info->state = QEDE_AGG_STATE_ERROR; goto cons_buf; } - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); tpa_info->frag_id = 0; tpa_info->state = QEDE_AGG_STATE_START; - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); if ((le16_to_cpu(cqe->pars_flags.flags) >> PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) @@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev, tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; skb = tpa_info->skb; + if (tpa_info->buffer.page_offset == PAGE_SIZE) + dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, + PAGE_SIZE, rxq->data_direction); + for (i = 0; cqe->len_list[i]; i++) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->len_list[i])); @@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev, "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", le16_to_cpu(cqe->total_packet_len), skb->len); - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - /* Finalize the SKB */ skb->protocol = eth_type_trans(skb, edev->ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev, return 1; err: tpa_info->state = QEDE_AGG_STATE_NONE; + + if (tpa_info->tpa_start_fail) { + qede_reuse_page(rxq, &tpa_info->buffer); + tpa_info->tpa_start_fail = false; + } + dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; return 0; @@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev, return false; } -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset + pad; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - skb_put_data(skb, page_address(page) + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - static int qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, @@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev, PAGE_SIZE, DMA_FROM_DEVICE); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); + bd->data, rxq->rx_headroom, cur_size); skb->truesize += PAGE_SIZE; skb->data_len += cur_size; @@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev, /* Basic validation passed; Need to prepare an SKB. This would also * guarantee to finally consume the first BD upon success. */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + skb = qede_rx_build_skb(edev, rxq, bd, len, pad); if (!skb) { rxq->rx_alloc_errors++; qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 89c581c3c21a..40e2b923af39 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1197,30 +1197,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev, } } -static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - int i; - - if (edev->gro_disable) - return; - - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { - struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - - if (replace_buf->data) { - dma_unmap_page(&edev->pdev->dev, - replace_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(replace_buf->data); - } - } -} - static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - qede_free_sge_mem(edev, rxq); - /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); @@ -1232,45 +1210,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); } -static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +static void qede_set_tpa_param(struct qede_rx_queue *rxq) { - dma_addr_t mapping; int i; - if (edev->gro_disable) - return 0; - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - - replace_buf->data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!replace_buf->data)) { - DP_NOTICE(edev, - "Failed to allocate TPA skb pool [replacement buffer]\n"); - goto err; - } - - mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - DP_NOTICE(edev, - "Failed to map TPA replacement buffer\n"); - goto err; - } - replace_buf->mapping = mapping; - tpa_info->buffer.page_offset = 0; - tpa_info->buffer_mapping = mapping; tpa_info->state = QEDE_AGG_STATE_NONE; } - - return 0; -err: - qede_free_sge_mem(edev, rxq); - edev->gro_disable = 1; - edev->ndev->features &= ~NETIF_F_GRO_HW; - return -ENOMEM; } /* This function allocates all memory needed per Rx queue */ @@ -1281,19 +1229,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->num_rx_buffers = edev->q_num_rx_buffers; rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; - rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; + + rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; + size = rxq->rx_headroom + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Make sure that the headroom and payload fit in a single page */ - if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) - rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; + if (rxq->rx_buf_size + size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE - size; - /* Segment size to spilt a page in multiple equal parts, + /* Segment size to spilt a page in multiple equal parts , * unless XDP is used in which case we'd use the entire page. */ - if (!edev->xdp_prog) - rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); - else + if (!edev->xdp_prog) { + size = size + rxq->rx_buf_size; + rxq->rx_buf_seg_size = roundup_pow_of_two(size); + } else { rxq->rx_buf_seg_size = PAGE_SIZE; + } /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; @@ -1337,7 +1290,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } - rc = qede_alloc_sge_mem(edev, rxq); + if (!edev->gro_disable) + qede_set_tpa_param(rxq); err: return rc; } -- cgit v1.2.3 From 2652113ff043ca2ce1cb3be529b5ca9270c421d4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 17 May 2018 13:07:43 -0700 Subject: net: ethernet: ti: Allow most drivers with COMPILE_TEST Most of the TI drivers build just fine with COMPILE_TEST, cpmac (AR7) is the exception because it uses a header file from arch/mips/include/asm/mach-ar7/ar7.h and keystone netcp which requires help from drivers/soc/ti/ for queue management helpers. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 48a541eb0af2..9263d638bd6d 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_TI config TI_DAVINCI_EMAC tristate "TI DaVinci EMAC Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST select TI_DAVINCI_MDIO select TI_DAVINCI_CPDMA select PHYLIB @@ -30,7 +30,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -40,7 +40,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST ---help--- This driver supports TI's DaVinci CPDMA dma engine. @@ -60,7 +60,7 @@ config TI_CPSW_ALE config TI_CPSW tristate "TI CPSW Switch Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL @@ -75,7 +75,7 @@ config TI_CPSW config TI_CPTS bool "TI Common Platform Time Sync (CPTS) Support" - depends on TI_CPSW || TI_KEYSTONE_NETCP + depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST depends on POSIX_TIMERS ---help--- This driver supports the Common Platform Time Sync unit of -- cgit v1.2.3 From 78cc6e7ef957037d7562e56d4463cf26da4e9e0e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 17 May 2018 13:07:44 -0700 Subject: net: ethernet: freescale: Allow FEC with COMPILE_TEST The Freescale FEC driver builds fine with COMPILE_TEST, so make that possible. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/Kconfig | 2 +- drivers/net/ethernet/freescale/fec.h | 2 +- drivers/net/ethernet/freescale/fec_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 6e490fd2345d..a580a3dcbe59 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -22,7 +22,7 @@ if NET_VENDOR_FREESCALE config FEC tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || SOC_IMX28) + ARCH_MXC || SOC_IMX28 || COMPILE_TEST) default ARCH_MXC || SOC_IMX28 if ARM select PHYLIB imply PTP_1588_CLOCK diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index e7381f8ef89d..4778b663653e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -21,7 +21,7 @@ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f3e43db0d6cb..4358f586e28f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2107,7 +2107,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, -- cgit v1.2.3 From 3c0596f8bec016210e9f470411f70a1a8ea41bcd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 17 May 2018 13:07:45 -0700 Subject: net: phy: Allow MDIO_MOXART and MDIO_SUN4I with COMPILE_TEST Those drivers build just fine with COMPILE_TEST, so make that possible. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 0e2305ccc91f..343989f9f9d9 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -118,7 +118,7 @@ config MDIO_I2C config MDIO_MOXART tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART + depends on ARCH_MOXART || COMPILE_TEST help This driver supports the MDIO interface found in the network interface units of the MOXA ART SoC @@ -142,7 +142,7 @@ config MDIO_OCTEON config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST help This driver supports the MDIO interface found in the network interface units of the Allwinner SoC that have an EMAC (A10, -- cgit v1.2.3 From 65360e5451121e493944776a7e33d1345c8483e6 Mon Sep 17 00:00:00 2001 From: Shahar Klein Date: Thu, 22 Mar 2018 11:56:51 +0200 Subject: net/mlx5: Properly handle a vport destination when setting FTE When creating FTE, properly distinguish between destination being vport or tir. The previous code just worked accidentally b/c of both dest being in the same offset within a union. Signed-off-by: Shahar Klein Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ef5afd7c9325..0bfce6a82c91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -372,6 +372,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { id = dst->dest_attr.ft->id; + } else if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_VPORT) { + id = dst->dest_attr.vport_num; } else { id = dst->dest_attr.tir_num; } -- cgit v1.2.3 From b17f7fc10facca9e1b2b9cef39af0a8179796c14 Mon Sep 17 00:00:00 2001 From: Shahar Klein Date: Thu, 22 Mar 2018 12:32:12 +0200 Subject: net/mlx5: Add destination e-switch owner The destination e-switch owner allows a rule in namespace of one e-switch owner to point to a vport that is natively associated with another e-switch owner. Signed-off-by: Shahar Klein Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 8 +++++++- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- include/linux/mlx5/fs.h | 6 +++++- include/linux/mlx5/mlx5_ifc.h | 5 +++-- 7 files changed, 21 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index d93ff567b40d..b3820a34e773 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -235,7 +235,7 @@ const char *parse_fs_dst(struct trace_seq *p, switch (dst->type) { case MLX5_FLOW_DESTINATION_TYPE_VPORT: - trace_seq_printf(p, "vport=%u\n", dst->vport_num); + trace_seq_printf(p, "vport=%u\n", dst->vport.num); break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: trace_seq_printf(p, "ft=%p\n", dst->ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 332bc56306bf..9a24314b817a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -192,7 +192,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = vport; + dest.vport.num = vport; esw_debug(esw->dev, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b123f8a52ad8..90c8cb31e633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -71,7 +71,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport_num = attr->out_rep->vport; + dest[i].vport.num = attr->out_rep->vport; i++; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { @@ -343,7 +343,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = vport; + dest.vport.num = vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, @@ -387,7 +387,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_c[0] = 0x01; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = 0; + dest.vport.num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 0bfce6a82c91..5a00deff5457 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -374,7 +374,13 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, id = dst->dest_attr.ft->id; } else if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT) { - id = dst->dest_attr.vport_num; + id = dst->dest_attr.vport.num; + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id_valid, + dst->dest_attr.vport.vhca_id_valid); + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id, + dst->dest_attr.vport.vhca_id); } else { id = dst->dest_attr.tir_num; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index de51e7c39bc8..5a80279b052a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1404,7 +1404,7 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, { if (d1->type == d2->type) { if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && - d1->vport_num == d2->vport_num) || + d1->vport.num == d2->vport.num) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && d1->ft == d2->ft) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 47aecc4fa8c2..9f4d32e41c06 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -90,8 +90,12 @@ struct mlx5_flow_destination { union { u32 tir_num; struct mlx5_flow_table *ft; - u32 vport_num; struct mlx5_fc *counter; + struct { + u16 num; + u16 vhca_id; + bool vhca_id_valid; + } vport; }; }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ef15f751a984..3d17709bc30c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1148,8 +1148,9 @@ enum mlx5_flow_destination_type { struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; - - u8 reserved_at_20[0x20]; + u8 destination_eswitch_owner_vhca_id_valid[0x1]; + u8 reserved_at_21[0xf]; + u8 destination_eswitch_owner_vhca_id[0x10]; }; struct mlx5_ifc_flow_counter_list_bits { -- cgit v1.2.3 From 56e858df9f4b917cc49d78831a7f5692075bc78f Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Sun, 18 Mar 2018 08:29:04 +0200 Subject: net/mlx5e: Explicitly set destination e-switch in FDB rules Set a specific destination e-switch when setting a destination vport. Signed-off-by: Rabie Loulou Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Reviewed-by: Shahar Klein Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 +++++ 3 files changed, 8 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4197001f9801..880adc810ccc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -836,6 +836,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; attr->out_rep = rpriv->rep; + attr->out_mdev = out_priv->mdev; } err = mlx5_eswitch_add_vlan_action(esw, attr); @@ -2501,6 +2502,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; attr->out_rep = rpriv->rep; + attr->out_mdev = out_priv->mdev; } else if (encap) { parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4cd773fa55e3..ac5db54823a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -230,6 +230,7 @@ enum { struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; + struct mlx5_core_dev *out_mdev; int action; __be16 vlan_proto; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 90c8cb31e633..ea93867d1ab4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -72,6 +72,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].vport.num = attr->out_rep->vport; + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev, vhca_id); + dest[i].vport.vhca_id_valid = 1; + } i++; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { -- cgit v1.2.3 From 3e99df87722874c669f6c24a79bd1b4ba5fec819 Mon Sep 17 00:00:00 2001 From: Shahar Klein Date: Sun, 18 Mar 2018 09:02:06 +0200 Subject: net/mlx5: Add source e-switch owner The source e-switch owner allows a vport on one e-switch port be associated with a rule defined on the second port e-switch. The role of the source eswitch owner valid bit in the flow group is to allow the firmware fail driver attempts to wild card the source eswitch match field. If this bit is not set, the firmware ignores the source eswitch owner field totally. Signed-off-by: Shahar Klein Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 10 ++++++++++ include/linux/mlx5/mlx5_ifc.h | 6 ++++-- 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5a80279b052a..b1a2ca0ff320 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1372,6 +1372,8 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, struct mlx5_core_dev *dev = get_dev(&ft->node); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; + u8 src_esw_owner_mask_on; + void *misc; int err; u32 *in; @@ -1384,6 +1386,14 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index); MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index + fg->max_ftes - 1); + + misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria, + misc_parameters); + src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + MLX5_SET(create_flow_group_in, in, + source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on); + match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); memcpy(match_criteria_addr, fg->mask.match_criteria, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3d17709bc30c..9c3538f1b8b9 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -412,7 +412,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_0[0x8]; u8 source_sqn[0x18]; - u8 reserved_at_20[0x10]; + u8 source_eswitch_owner_vhca_id[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; @@ -6995,7 +6995,9 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_at_c0[0x20]; + u8 source_eswitch_owner_vhca_id_valid[0x1]; + + u8 reserved_at_c1[0x1f]; u8 start_flow_index[0x20]; -- cgit v1.2.3 From 10ff5359f883412728ba816046ee3a696625ca02 Mon Sep 17 00:00:00 2001 From: Shahar Klein Date: Sun, 18 Mar 2018 09:03:49 +0200 Subject: net/mlx5e: Explicitly set source e-switch in offloaded TC rules Set a specific source e-switch when setting a rule that matches on the ingress port. Signed-off-by: Shahar Klein Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 ++++++++ 3 files changed, 10 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 880adc810ccc..1d2ba687b902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2462,6 +2462,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, memset(attr, 0, sizeof(*attr)); attr->in_rep = rpriv->rep; + attr->in_mdev = priv->mdev; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ac5db54823a1..98a306e02640 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -231,6 +231,7 @@ struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; struct mlx5_core_dev *out_mdev; + struct mlx5_core_dev *in_mdev; int action; __be16 vlan_proto; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index ea93867d1ab4..6c83eef5141a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -93,8 +93,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; -- cgit v1.2.3 From b1d90e6bbd149fe37b871d690d68aab4137a8987 Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Thu, 11 Jan 2018 10:34:32 +0200 Subject: net/mlx5e: Offload TC eswitch rules for VFs belonging to different PFs When the merged eswitch capability is supported, allow offloading rules between VFs which belong to different PFs (and hence have different eswitch affinity). Signed-off-by: Rabie Loulou Reviewed-by: Or Gerlitz Reviewed-by: Roi Dayan Reviewed-by: Shahar Klein Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 630dd6dcabb9..77c3f8b8ae96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2077,6 +2077,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, return 0; } +static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, + struct net_device *peer_netdev) +{ + struct mlx5e_priv *peer_priv; + + peer_priv = netdev_priv(peer_netdev); + + return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && + (priv->netdev->netdev_ops == peer_netdev->netdev_ops) && + same_hw_devs(priv, peer_priv) && + MLX5_VPORT_MANAGER(peer_priv->mdev) && + (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); +} + static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct net_device **out_dev, @@ -2535,7 +2549,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, out_dev = tcf_mirred_dev(a); if (switchdev_port_same_parent_id(priv->netdev, - out_dev)) { + out_dev) || + is_merged_eswitch_dev(priv, out_dev)) { action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); -- cgit v1.2.3 From 60bd4af814fec164c42bdd2efd7984b85d6b1e1e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 18 Apr 2018 13:45:11 +0300 Subject: net/mlx5e: Add ingress/egress indication for offloaded TC flows When an e-switch TC rule is offloaded through the egdev (egress device) mechanism, we treat this as egress, all other cases (NIC and e-switch) are considred ingress. This is preparation step that will allow us to identify "wrong" stat/del offload calls made by the TC core on egdev based flows and ignore them. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 -- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 ++++----- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 32 ++++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 38 +++++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 13 ++++++-- 5 files changed, 70 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7c930088e96e..51a1d36a56c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1118,9 +1118,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); - /* mlx5e generic netdev management API */ struct net_device* mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 417bf2e8ab85..27e8375a476b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3136,22 +3136,23 @@ out: #ifdef CONFIG_MLX5_ESWITCH static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *cls_flower) + struct tc_cls_flower_offload *cls_flower, + int flags) { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower); + return mlx5e_configure_flower(priv, cls_flower, flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower); + return mlx5e_delete_flower(priv, cls_flower, flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower); + return mlx5e_stats_flower(priv, cls_flower, flags); default: return -EOPNOTSUPP; } } -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { struct mlx5e_priv *priv = cb_priv; @@ -3160,7 +3161,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data); + return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a689f4c90fe3..182b636552a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -723,15 +723,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, static int mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *cls_flower) + struct tc_cls_flower_offload *cls_flower, int flags) { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower); + return mlx5e_configure_flower(priv, cls_flower, flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower); + return mlx5e_delete_flower(priv, cls_flower, flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower); + return mlx5e_stats_flower(priv, cls_flower, flags); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct mlx5e_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS); default: return -EOPNOTSUPP; } @@ -747,7 +763,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data); + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); default: return -EOPNOTSUPP; } @@ -1107,7 +1123,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); upriv = netdev_priv(uplink_rpriv->netdev); - err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, + err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); if (err) goto err_neigh_cleanup; @@ -1122,7 +1138,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) return 0; err_egdev_cleanup: - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); err_neigh_cleanup: @@ -1151,7 +1167,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); upriv = netdev_priv(uplink_rpriv->netdev); - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 77c3f8b8ae96..26a1312ec9f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -62,12 +62,16 @@ struct mlx5_nic_flow_attr { struct mlx5_flow_table *hairpin_ft; }; +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) + enum { - MLX5E_TC_FLOW_ESWITCH = BIT(0), - MLX5E_TC_FLOW_NIC = BIT(1), - MLX5E_TC_FLOW_OFFLOADED = BIT(2), - MLX5E_TC_FLOW_HAIRPIN = BIT(3), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4), + MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, + MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, + MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE), + MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1), + MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2), + MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3), + MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), }; struct mlx5e_tc_flow { @@ -2618,8 +2622,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } +static void get_flags(int flags, u8 *flow_flags) +{ + u8 __flow_flags = 0; + + if (flags & MLX5E_TC_INGRESS) + __flow_flags |= MLX5E_TC_FLOW_INGRESS; + if (flags & MLX5E_TC_EGRESS) + __flow_flags |= MLX5E_TC_FLOW_EGRESS; + + *flow_flags = __flow_flags; +} + int mlx5e_configure_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; @@ -2628,11 +2644,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, int attr_size, err = 0; u8 flow_flags = 0; + get_flags(flags, &flow_flags); + if (esw && esw->mode == SRIOV_OFFLOADS) { - flow_flags = MLX5E_TC_FLOW_ESWITCH; + flow_flags |= MLX5E_TC_FLOW_ESWITCH; attr_size = sizeof(struct mlx5_esw_flow_attr); } else { - flow_flags = MLX5E_TC_FLOW_NIC; + flow_flags |= MLX5E_TC_FLOW_NIC; attr_size = sizeof(struct mlx5_nic_flow_attr); } @@ -2691,7 +2709,7 @@ err_free: } int mlx5e_delete_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { struct mlx5e_tc_flow *flow; struct mlx5e_tc_table *tc = &priv->fs.tc; @@ -2711,7 +2729,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, } int mlx5e_stats_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_flow *flow; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index c14c263a739b..2255345c2e18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -38,16 +38,23 @@ #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff #ifdef CONFIG_MLX5_ESWITCH + +enum { + MLX5E_TC_INGRESS = BIT(0), + MLX5E_TC_EGRESS = BIT(1), + MLX5E_TC_LAST_EXPORTED_BIT = 1, +}; + int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); int mlx5e_configure_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); int mlx5e_delete_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); int mlx5e_stats_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); struct mlx5e_encap_entry; void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, -- cgit v1.2.3 From 05866c82360aef6005de1920d585ae604309e732 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 10 Apr 2018 14:27:43 +0300 Subject: net/mlx5e: Prepare for shared table to keep TC eswitch flows This is a refactoring step to be able and store the hash table which keeps track of offloaded TC flows in a different location for NIC vs e-switch rules. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 39 +++++++++++++------------ 2 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 51a1d36a56c5..bc91a7335c93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -634,7 +634,6 @@ struct mlx5e_flow_table { struct mlx5e_tc_table { struct mlx5_flow_table *t; - struct rhashtable_params ht_params; struct rhashtable ht; DECLARE_HASHTABLE(mod_hdr_tbl, 8); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 26a1312ec9f8..1c90586d7f58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2634,12 +2634,24 @@ static void get_flags(int flags, u8 *flow_flags) *flow_flags = __flow_flags; } +static const struct rhashtable_params tc_ht_params = { + .head_offset = offsetof(struct mlx5e_tc_flow, node), + .key_offset = offsetof(struct mlx5e_tc_flow, cookie), + .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) +{ + return &priv->fs.tc.ht; +} + int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; int attr_size, err = 0; u8 flow_flags = 0; @@ -2693,8 +2705,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) kvfree(parse_attr); - err = rhashtable_insert_fast(&tc->ht, &flow->node, - tc->ht_params); + err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) { mlx5e_tc_del_flow(priv, flow); kfree(flow); @@ -2711,15 +2722,14 @@ err_free: int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; - struct mlx5e_tc_table *tc = &priv->fs.tc; - flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, - tc->ht_params); + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); if (!flow) return -EINVAL; - rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); mlx5e_tc_del_flow(priv, flow); @@ -2731,15 +2741,14 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, int mlx5e_stats_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; struct mlx5_fc *counter; u64 bytes; u64 packets; u64 lastuse; - flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, - tc->ht_params); + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); if (!flow) return -EINVAL; @@ -2757,13 +2766,6 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, return 0; } -static const struct rhashtable_params mlx5e_tc_flow_ht_params = { - .head_offset = offsetof(struct mlx5e_tc_flow, node), - .key_offset = offsetof(struct mlx5e_tc_flow, cookie), - .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), - .automatic_shrinking = true, -}; - int mlx5e_tc_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; @@ -2771,8 +2773,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv) hash_init(tc->mod_hdr_tbl); hash_init(tc->hairpin_tbl); - tc->ht_params = mlx5e_tc_flow_ht_params; - return rhashtable_init(&tc->ht, &tc->ht_params); + return rhashtable_init(&tc->ht, &tc_ht_params); } static void _mlx5e_tc_del_flow(void *ptr, void *arg) -- cgit v1.2.3 From 655dc3d2b91bf241f5baca5eb2bc2b1e22a561ff Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 10 Apr 2018 18:34:36 +0300 Subject: net/mlx5e: Use shared table for offloaded TC eswitch flows Currently, each representor netdev use their own hash table to keep the mapping from TC flow (f->cookie) to the driver offloaded instance. The table is the one which originally was added for offloading TC NIC (not eswitch) rules. This scheme breaks when the core TC code calls us to add the same flow twice, (e.g under egdev use case) since we don't spot that and offload a 2nd flow into the HW with the wrong source vport. As a pre-step to solve that, we move to use a single table which keeps all offloaded TC eswitch flows. The table is located at the eswitch uplink representor object. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 19 +++++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 29 +++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 11 +++++---- 5 files changed, 43 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 27e8375a476b..b5a7580b12fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4462,7 +4462,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_direct_tirs; } - err = mlx5e_tc_init(priv); + err = mlx5e_tc_nic_init(priv); if (err) goto err_destroy_flow_steering; @@ -4483,7 +4483,7 @@ err_destroy_indirect_rqts: static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { - mlx5e_tc_cleanup(priv); + mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 182b636552a6..aa32592a54cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -981,14 +981,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) } rpriv->vport_rx_rule = flow_rule; - err = mlx5e_tc_init(priv); - if (err) - goto err_del_flow_rule; - return 0; -err_del_flow_rule: - mlx5_del_flow_rules(rpriv->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: @@ -1000,7 +994,6 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_tc_cleanup(priv); mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_rqts(priv); @@ -1058,8 +1051,15 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) if (err) goto err_remove_sqs; + /* init shared tc flow table */ + err = mlx5e_tc_esw_init(&rpriv->tc_ht); + if (err) + goto err_neigh_cleanup; + return 0; +err_neigh_cleanup: + mlx5e_rep_neigh_cleanup(rpriv); err_remove_sqs: mlx5e_remove_sqs_fwd_rules(priv); return err; @@ -1074,9 +1074,8 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_remove_sqs_fwd_rules(priv); - /* clean (and re-init) existing uplink offloaded TC rules */ - mlx5e_tc_cleanup(priv); - mlx5e_tc_init(priv); + /* clean uplink offloaded TC rules, delete shared tc flow table */ + mlx5e_tc_esw_cleanup(&rpriv->tc_ht); mlx5e_rep_neigh_cleanup(rpriv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index b9b481f2833a..844d32d5c29f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -59,6 +59,7 @@ struct mlx5e_rep_priv { struct net_device *netdev; struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; + struct rhashtable tc_ht; /* valid for uplink rep */ }; static inline diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1c90586d7f58..05c90b4f8a31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -76,6 +76,7 @@ enum { struct mlx5e_tc_flow { struct rhash_head node; + struct mlx5e_priv *priv; u64 cookie; u8 flags; struct mlx5_flow_handle *rule; @@ -2643,7 +2644,14 @@ static const struct rhashtable_params tc_ht_params = { static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) { - return &priv->fs.tc.ht; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *uplink_rpriv; + + if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + return &uplink_rpriv->tc_ht; + } else + return &priv->fs.tc.ht; } int mlx5e_configure_flower(struct mlx5e_priv *priv, @@ -2675,6 +2683,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, flow->cookie = f->cookie; flow->flags = flow_flags; + flow->priv = priv; err = parse_cls_flower(priv, flow, &parse_attr->spec, f); if (err < 0) @@ -2766,7 +2775,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, return 0; } -int mlx5e_tc_init(struct mlx5e_priv *priv) +int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; @@ -2779,20 +2788,30 @@ int mlx5e_tc_init(struct mlx5e_priv *priv) static void _mlx5e_tc_del_flow(void *ptr, void *arg) { struct mlx5e_tc_flow *flow = ptr; - struct mlx5e_priv *priv = arg; + struct mlx5e_priv *priv = flow->priv; mlx5e_tc_del_flow(priv, flow); kfree(flow); } -void mlx5e_tc_cleanup(struct mlx5e_priv *priv) +void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; - rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); + rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); if (!IS_ERR_OR_NULL(tc->t)) { mlx5_destroy_flow_table(tc->t); tc->t = NULL; } } + +int mlx5e_tc_esw_init(struct rhashtable *tc_ht) +{ + return rhashtable_init(tc_ht, &tc_ht_params); +} + +void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) +{ + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 2255345c2e18..59e52b845beb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -45,8 +45,11 @@ enum { MLX5E_TC_LAST_EXPORTED_BIT = 1, }; -int mlx5e_tc_init(struct mlx5e_priv *priv); -void mlx5e_tc_cleanup(struct mlx5e_priv *priv); +int mlx5e_tc_nic_init(struct mlx5e_priv *priv); +void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); + +int mlx5e_tc_esw_init(struct rhashtable *tc_ht); +void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags); @@ -71,8 +74,8 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) } #else /* CONFIG_MLX5_ESWITCH */ -static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; } -static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } #endif -- cgit v1.2.3 From 8f8ae8953fb34ac01723f1dae5b231f64a3c526b Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 10 Apr 2018 19:24:51 +0300 Subject: net/mlx5e: Ignore attempts to offload multiple times a TC flow For VF->VF and uplink->VF rules, the TC core (cls_api) attempts to offload the same flow multiple times into the driver, b/c we registered to the egdev callback. Use the flow cookie to ignore attempts to add such flows, we can't reject them (return error), b/c this will fail the offload attempt, so we ignore that. We indentify wrong stat/del calls using the flow ingress/egress flags, here we do return error to the core. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 05c90b4f8a31..674f1d7d2737 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2666,6 +2666,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, get_flags(flags, &flow_flags); + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + if (flow) { + netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); + return 0; + } + if (esw && esw->mode == SRIOV_OFFLOADS) { flow_flags |= MLX5E_TC_FLOW_ESWITCH; attr_size = sizeof(struct mlx5_esw_flow_attr); @@ -2728,6 +2734,17 @@ err_free: return err; } +#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS) +#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS) + +static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) +{ + if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK)) + return true; + + return false; +} + int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { @@ -2735,7 +2752,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow) + if (!flow || !same_flow_direction(flow, flags)) return -EINVAL; rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); @@ -2758,7 +2775,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, u64 lastuse; flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow) + if (!flow || !same_flow_direction(flow, flags)) return -EINVAL; if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) -- cgit v1.2.3 From a228060a7c9ab88597eeac131e4578595d5d46ae Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 30 Apr 2018 11:42:26 +0300 Subject: net/mlx5e: Add HW vport counters to representor ethtool stats Currently the representor only report the SW (slow-path) traffic counters. Add packet/bytes reporting of the HW counters, which account for the total amount of traffic that was handled by the vport, both slow and fast (offloaded) paths. The newly exposed counters are named vport_rx/tx_packets/bytes. Signed-off-by: Or Gerlitz Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 35 ++++++++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index aa32592a54cb..c3034f58aa33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -66,18 +66,36 @@ static const struct counter_desc sw_rep_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, }; -#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) +struct vport_stats { + u64 vport_rx_packets; + u64 vport_tx_packets; + u64 vport_rx_bytes; + u64 vport_tx_bytes; +}; + +static const struct counter_desc vport_rep_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, +}; + +#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) +#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) static void mlx5e_rep_get_strings(struct net_device *dev, u32 stringset, uint8_t *data) { - int i; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) strcpy(data + (i * ETH_GSTRING_LEN), sw_rep_stats_desc[i].format); + for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) + strcpy(data + (i * ETH_GSTRING_LEN), + vport_rep_stats_desc[j].format); break; } } @@ -140,7 +158,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); - int i; + int i, j; if (!data) return; @@ -148,18 +166,23 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_rep_update_sw_counters(priv); + mlx5e_rep_update_hw_counters(priv); mutex_unlock(&priv->state_lock); - for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_rep_stats_desc, i); + + for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) + data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, + vport_rep_stats_desc, j); } static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_REP_COUNTERS; + return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS; default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 67e1c4068d325d2a15a41c4c1509a69598f693f1 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:55:58 +0100 Subject: net: stmmac: Enable OSP for GMAC4 This enables OSP (Operate on Second Packet) for GMAC4. The feature allows DMA to fetch second descriptor while its still processing the first one. Running iperf, the performance gain is +/- 38%. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 117c3a5288f0..9aab5b35e2d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); + + /* Enable OSP to get best performance */ + value |= DMA_CONTROL_OSP; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); -- cgit v1.2.3 From 4ae0169fd1b3c792b66be58995b7e6b629919ecf Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:55:59 +0100 Subject: net: stmmac: Do not keep rearming the coalesce timer in stmmac_xmit This is cutting down performance. Once the timer is armed it should run after the time expires for the first packet sent and not the last one. After this change, running iperf, the performance gain is +/- 24%. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 42fc76e76bf9..4d425b1a0c59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -105,6 +105,7 @@ struct stmmac_priv { u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; + bool tx_timer_armed; int tx_coalesce; int hwts_tx_en; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d9dbe1355896..789bc226ba69 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3158,13 +3158,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * element in case of no SG. */ priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + if (likely(priv->tx_coal_frames > priv->tx_count_frames) && + !priv->tx_timer_armed) { mod_timer(&priv->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); + priv->tx_timer_armed = true; } else { priv->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + priv->tx_timer_armed = false; } skb_tx_timestamp(skb); -- cgit v1.2.3 From 6844171d5b0567a18248593176e23ae97d76b346 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:00 +0100 Subject: net: stmmac: Let descriptor code set skbuff address Stop using if conditions depending on the GMAC version for setting the the descriptor skbuff address and use instead a helper implemented in the descriptor files. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 7 ++++++ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 4 ++++ drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 25 ++++++---------------- 5 files changed, 29 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 65ed896c13cb..f67caa14b2f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -424,6 +424,12 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); } +static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(addr); + p->des1 = 0; +} + const struct stmmac_desc_ops dwmac4_desc_ops = { .tx_status = dwmac4_wrback_get_tx_status, .rx_status = dwmac4_wrback_get_rx_status, @@ -445,6 +451,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .init_tx_desc = dwmac4_rd_init_tx_desc, .display_ring = dwmac4_display_ring, .set_mss = dwmac4_set_mss_ctxt, + .set_addr = dwmac4_set_addr, }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 3bfb3f584be2..02749e4b1a47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -437,6 +437,11 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -457,4 +462,5 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .display_ring = enh_desc_display_ring, + .set_addr = enh_desc_set_addr, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index b7539a14e6ad..d66d194d5f89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -79,6 +79,8 @@ struct stmmac_desc_ops { void (*display_ring)(void *head, unsigned int size, bool rx); /* set MSS via context descriptor */ void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* set descriptor skbuff address */ + void (*set_addr)(struct dma_desc *p, dma_addr_t addr); }; #define stmmac_init_rx_desc(__priv, __args...) \ @@ -123,6 +125,8 @@ struct stmmac_desc_ops { stmmac_do_void_callback(__priv, desc, display_ring, __args) #define stmmac_set_mss(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_mss, __args) +#define stmmac_set_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_addr, __args) struct stmmac_dma_cfg; struct dma_features; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 7b1d901bf5bc..6cf2c7c5208a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -297,6 +297,11 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -316,4 +321,5 @@ const struct stmmac_desc_ops ndesc_ops = { .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .display_ring = ndesc_display_ring, + .set_addr = ndesc_set_addr, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 789bc226ba69..3f559d72b41f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1156,10 +1156,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return -EINVAL; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) - p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); - else - p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); + stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); if (priv->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); @@ -3100,10 +3097,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; /* should reuse desc w/o issues */ tx_q->tx_skbuff_dma[entry].buf = des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - desc->des0 = cpu_to_le32(des); - else - desc->des2 = cpu_to_le32(des); + + stmmac_set_desc_addr(priv, desc, des); tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; @@ -3185,10 +3180,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - first->des0 = cpu_to_le32(des); - else - first->des2 = cpu_to_le32(des); + + stmmac_set_desc_addr(priv, first, des); tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; @@ -3302,13 +3295,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; } - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); - p->des1 = 0; - } else { - p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); - } - + stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); stmmac_refill_desc3(priv, rx_q, p); if (rx_q->rx_zeroc_thresh > 0) -- cgit v1.2.3 From 44c67f8559fc5e2f818cbb11fde1ee1bebc59e7c Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:01 +0100 Subject: net: stmmac: Let descriptor code clear the descriptor Stop using if conditions depending on the GMAC version for clearing the descriptor and use instead a helper implemented in the descriptor files. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 9 +++++++++ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 4 ++++ drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 9 +-------- 5 files changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index f67caa14b2f5..119a2f93dd55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -430,6 +430,14 @@ static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) p->des1 = 0; } +static void dwmac4_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + const struct stmmac_desc_ops dwmac4_desc_ops = { .tx_status = dwmac4_wrback_get_tx_status, .rx_status = dwmac4_wrback_get_rx_status, @@ -452,6 +460,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .display_ring = dwmac4_display_ring, .set_mss = dwmac4_set_mss_ctxt, .set_addr = dwmac4_set_addr, + .clear = dwmac4_clear, }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 02749e4b1a47..17cd26fb2a3d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -442,6 +442,11 @@ static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) p->des2 = cpu_to_le32(addr); } +static void enh_desc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -463,4 +468,5 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .display_ring = enh_desc_display_ring, .set_addr = enh_desc_set_addr, + .clear = enh_desc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index d66d194d5f89..a6b9c9790d11 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -81,6 +81,8 @@ struct stmmac_desc_ops { void (*set_mss)(struct dma_desc *p, unsigned int mss); /* set descriptor skbuff address */ void (*set_addr)(struct dma_desc *p, dma_addr_t addr); + /* clear descriptor */ + void (*clear)(struct dma_desc *p); }; #define stmmac_init_rx_desc(__priv, __args...) \ @@ -127,6 +129,8 @@ struct stmmac_desc_ops { stmmac_do_void_callback(__priv, desc, set_mss, __args) #define stmmac_set_desc_addr(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_addr, __args) +#define stmmac_clear_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, clear, __args) struct stmmac_dma_cfg; struct dma_features; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 6cf2c7c5208a..a7b221b59178 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -302,6 +302,11 @@ static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) p->des2 = cpu_to_le32(addr); } +static void ndesc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -322,4 +327,5 @@ const struct stmmac_desc_ops ndesc_ops = { .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .display_ring = ndesc_display_ring, .set_addr = ndesc_set_addr, + .clear = ndesc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3f559d72b41f..0ccee6a779f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1341,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) else p = tx_q->dma_tx + i; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - p->des0 = 0; - p->des1 = 0; - p->des2 = 0; - p->des3 = 0; - } else { - p->des2 = 0; - } + stmmac_clear_desc(priv, p); tx_q->tx_skbuff_dma[i].buf = 0; tx_q->tx_skbuff_dma[i].map_as_page = false; -- cgit v1.2.3 From ab0204e35c2b4462d50f3d4e8df0c965e4f0688a Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:02 +0100 Subject: net: stmmac: Uniformize the use of dma_{rx/tx}_mode callbacks Instead of relying on the GMAC version for choosing if we need to use dma_{rx/tx}_mode or just dma_mode callback lets uniformize this and always use the dma_{rx/tx}_mode callbacks. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 57 ++++++++++-------- .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 67 ++++++++++++---------- drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 10 ++-- drivers/net/ethernet/stmicro/stmmac/hwif.h | 6 -- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 32 ++++------- 5 files changed, 86 insertions(+), 86 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 2f7f0915f071..11c287a7ed52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -437,13 +437,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, return ret; } -static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL1); + if (mode == SF_DMA_MODE) { + v |= EMAC_RX_MD; + } else { + v &= ~EMAC_RX_MD; + v &= ~EMAC_RX_TH_MASK; + if (mode < 32) + v |= EMAC_RX_TH_32; + else if (mode < 64) + v |= EMAC_RX_TH_64; + else if (mode < 96) + v |= EMAC_RX_TH_96; + else if (mode < 128) + v |= EMAC_RX_TH_128; + } + writel(v, ioaddr + EMAC_RX_CTL1); +} + +static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 v; v = readl(ioaddr + EMAC_TX_CTL1); - if (txmode == SF_DMA_MODE) { + if (mode == SF_DMA_MODE) { v |= EMAC_TX_MD; /* Undocumented bit (called TX_NEXT_FRM in BSP), the original * comment is @@ -454,40 +477,24 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, } else { v &= ~EMAC_TX_MD; v &= ~EMAC_TX_TH_MASK; - if (txmode < 64) + if (mode < 64) v |= EMAC_TX_TH_64; - else if (txmode < 128) + else if (mode < 128) v |= EMAC_TX_TH_128; - else if (txmode < 192) + else if (mode < 192) v |= EMAC_TX_TH_192; - else if (txmode < 256) + else if (mode < 256) v |= EMAC_TX_TH_256; } writel(v, ioaddr + EMAC_TX_CTL1); - - v = readl(ioaddr + EMAC_RX_CTL1); - if (rxmode == SF_DMA_MODE) { - v |= EMAC_RX_MD; - } else { - v &= ~EMAC_RX_MD; - v &= ~EMAC_RX_TH_MASK; - if (rxmode < 32) - v |= EMAC_RX_TH_32; - else if (rxmode < 64) - v |= EMAC_RX_TH_64; - else if (rxmode < 96) - v |= EMAC_RX_TH_96; - else if (rxmode < 128) - v |= EMAC_RX_TH_128; - } - writel(v, ioaddr + EMAC_RX_CTL1); } static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { .reset = sun8i_dwmac_dma_reset, .init = sun8i_dwmac_dma_init, .dump_regs = sun8i_dwmac_dump_regs, - .dma_mode = sun8i_dwmac_dma_operation_mode, + .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx, + .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx, .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission, .enable_dma_irq = sun8i_dwmac_enable_dma_irq, .disable_dma_irq = sun8i_dwmac_disable_dma_irq, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 7ecf549c7f1c..d7447b05f7a5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -148,12 +148,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) return csr6; } -static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); - if (txmode == SF_DMA_MODE) { + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (mode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (mode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, fifosz); + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; @@ -162,42 +190,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, */ csr6 |= DMA_CONTROL_OSF; } else { - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; /* Set the transmit threshold */ - if (txmode <= 32) + if (mode <= 32) csr6 |= DMA_CONTROL_TTC_32; - else if (txmode <= 64) + else if (mode <= 64) csr6 |= DMA_CONTROL_TTC_64; - else if (txmode <= 128) + else if (mode <= 128) csr6 |= DMA_CONTROL_TTC_128; - else if (txmode <= 192) + else if (mode <= 192) csr6 |= DMA_CONTROL_TTC_192; else csr6 |= DMA_CONTROL_TTC_256; } - if (rxmode == SF_DMA_MODE) { - pr_debug("GMAC: enable RX store and forward mode\n"); - csr6 |= DMA_CONTROL_RSF; - } else { - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); - csr6 &= ~DMA_CONTROL_RSF; - csr6 &= DMA_CONTROL_TC_RX_MASK; - if (rxmode <= 32) - csr6 |= DMA_CONTROL_RTC_32; - else if (rxmode <= 64) - csr6 |= DMA_CONTROL_RTC_64; - else if (rxmode <= 96) - csr6 |= DMA_CONTROL_RTC_96; - else - csr6 |= DMA_CONTROL_RTC_128; - } - - /* Configure flow control based on rx fifo size */ - csr6 = dwmac1000_configure_fc(csr6, rxfifosz); - writel(csr6, ioaddr + DMA_CONTROL); } @@ -258,7 +266,8 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .init = dwmac1000_dma_init, .axi = dwmac1000_dma_axi, .dump_regs = dwmac1000_dump_dma_regs, - .dma_mode = dwmac1000_dma_operation_mode, + .dma_rx_mode = dwmac1000_dma_operation_mode_rx, + .dma_tx_mode = dwmac1000_dma_operation_mode_tx, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 6502b9aa3bf5..80339d3d71c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -51,14 +51,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr, * The transmit threshold can be programmed by setting the TTC bits in the DMA * control register. */ -static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); - if (txmode <= 32) + if (mode <= 32) csr6 |= DMA_CONTROL_TTC_32; - else if (txmode <= 64) + else if (mode <= 64) csr6 |= DMA_CONTROL_TTC_64; else csr6 |= DMA_CONTROL_TTC_128; @@ -113,7 +113,7 @@ const struct stmmac_dma_ops dwmac100_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac100_dma_init, .dump_regs = dwmac100_dump_dma_regs, - .dma_mode = dwmac100_dma_operation_mode, + .dma_tx_mode = dwmac100_dma_operation_mode_tx, .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index a6b9c9790d11..3ff4afe5e452 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -153,10 +153,6 @@ struct stmmac_dma_ops { void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); - /* Set tx/rx threshold in the csr6 register - * An invalid value enables the store-and-forward mode */ - void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, - int rxfifosz); void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode); void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, @@ -199,8 +195,6 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, axi, __args) #define stmmac_dump_dma_regs(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, dump_regs, __args) -#define stmmac_dma_mode(__priv, __args...) \ - stmmac_do_void_callback(__priv, dma, dma_mode, __args) #define stmmac_dma_rx_mode(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) #define stmmac_dma_tx_mode(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0ccee6a779f9..beb7ec12dfa6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1787,22 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } /* configure all channels */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - for (chan = 0; chan < rx_channels_count; chan++) { - qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + for (chan = 0; chan < rx_channels_count; chan++) { + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; - stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, - rxfifosz, qmode); - } + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, + rxfifosz, qmode); + } - for (chan = 0; chan < tx_channels_count; chan++) { - qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + for (chan = 0; chan < tx_channels_count; chan++) { + qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; - stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, - txfifosz, qmode); - } - } else { - stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, + txfifosz, qmode); } } @@ -1971,14 +1967,8 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, - rxqmode); - stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, - txqmode); - } else { - stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); - } + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); } static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) -- cgit v1.2.3 From 63a550fc151c5b20bb6d7c7c21dca8e56a67620f Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:03 +0100 Subject: net: stmmac: Remove uneeded checks for GMAC version With the introducion of callbacks check in hwif.h we only call the callback if HW supports it so there is no longer need to check for GMAC version. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index beb7ec12dfa6..ce6f839ccbea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1973,11 +1973,8 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) { - int ret = false; + int ret; - /* Safety features are only available in cores >= 5.10 */ - if (priv->synopsys_id < DWMAC_CORE_5_10) - return ret; ret = stmmac_safety_feat_irq_status(priv, priv->dev, priv->ioaddr, priv->dma_cap.asp, &priv->sstats); if (ret && (ret != -EINVAL)) { @@ -2495,12 +2492,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_core_init(priv, priv->hw, dev); /* Initialize MTL*/ - if (priv->synopsys_id >= DWMAC_CORE_4_00) - stmmac_mtl_configuration(priv); + stmmac_mtl_configuration(priv); /* Initialize Safety Features */ - if (priv->synopsys_id >= DWMAC_CORE_5_10) - stmmac_safety_feat_configuration(priv); + stmmac_safety_feat_configuration(priv); ret = stmmac_rx_ipc(priv, priv->hw); if (!ret) { @@ -3054,10 +3049,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (enh_desc) is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); - if (unlikely(is_jumbo) && likely(priv->synopsys_id < - DWMAC_CORE_4_00)) { + if (unlikely(is_jumbo)) { entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); - if (unlikely(entry < 0)) + if (unlikely(entry < 0) && (entry != -EINVAL)) goto dma_map_err; } -- cgit v1.2.3 From 758d5c73e2a3e9d06708112e1034b4fa3e9df53f Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:04 +0100 Subject: net: stmmac: Move PTP and MMC base address calculation to hwif.c PTP and MMC modules base address can depend on the GMAC version. As this is HW specific lets move this base address calculation to hwif.c. Also, add an entry in the HW table so that we can specify the module offset. This can later be extended to more modules, if deemed necessary. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/hwif.c | 34 +++++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 5 ++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ------ 3 files changed, 39 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 9acc8d2f1039..23a12649e247 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -6,6 +6,7 @@ #include "common.h" #include "stmmac.h" +#include "stmmac_ptp.h" static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) { @@ -72,6 +73,7 @@ static const struct stmmac_hwif_entry { bool gmac; bool gmac4; u32 min_id; + const struct stmmac_regs_off regs; const void *desc; const void *dma; const void *mac; @@ -86,6 +88,10 @@ static const struct stmmac_hwif_entry { .gmac = false, .gmac4 = false, .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, .desc = NULL, .dma = &dwmac100_dma_ops, .mac = &dwmac100_ops, @@ -98,6 +104,10 @@ static const struct stmmac_hwif_entry { .gmac = true, .gmac4 = false, .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, .desc = NULL, .dma = &dwmac1000_dma_ops, .mac = &dwmac1000_ops, @@ -110,6 +120,10 @@ static const struct stmmac_hwif_entry { .gmac = false, .gmac4 = true, .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, .desc = &dwmac4_desc_ops, .dma = &dwmac4_dma_ops, .mac = &dwmac4_ops, @@ -122,6 +136,10 @@ static const struct stmmac_hwif_entry { .gmac = false, .gmac4 = true, .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, .desc = &dwmac4_desc_ops, .dma = &dwmac4_dma_ops, .mac = &dwmac410_ops, @@ -134,6 +152,10 @@ static const struct stmmac_hwif_entry { .gmac = false, .gmac4 = true, .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, .mac = &dwmac410_ops, @@ -146,6 +168,10 @@ static const struct stmmac_hwif_entry { .gmac = false, .gmac4 = true, .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, .mac = &dwmac510_ops, @@ -175,6 +201,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv) /* Save ID for later use */ priv->synopsys_id = id; + /* Lets assume some safe values first */ + priv->ptpaddr = priv->ioaddr + + (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); + priv->mmcaddr = priv->ioaddr + + (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); + /* Check for HW specific setup first */ if (priv->plat->setup) { priv->hw = priv->plat->setup(priv); @@ -206,6 +238,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) mac->tc = entry->tc; priv->hw = mac; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; /* Entry found */ ret = entry->setup(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 3ff4afe5e452..06fb20b27abf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -442,6 +442,11 @@ struct stmmac_tc_ops { #define stmmac_tc_setup_cls_u32(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +struct stmmac_regs_off { + u32 ptp_off; + u32 mmc_off; +}; + extern const struct stmmac_ops dwmac100_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops; extern const struct stmmac_ops dwmac1000_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ce6f839ccbea..a4d6ea7eb051 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2085,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; - priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; - } else { - priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; - priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; - } - dwmac_mmc_intr_all_mask(priv->mmcaddr); if (priv->dma_cap.rmon) { -- cgit v1.2.3 From 24aaed0cc0953fce632d539a148025f0e4a82bcf Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:05 +0100 Subject: net: stmmac: Uniformize the use of dma_init_* callbacks Instead of relying on the GMAC version for choosing if we need to use dma_init or dma_init_{rx/tx}_chan callback, lets uniformize this and always use the dma_init_{rx/tx}_chan callbacks. While at it, fix the use of dma_init_chan callback, which shall be called for as many channels as the max of rx/tx channels. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 25 ++++++-- .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 25 +++++--- drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 25 +++++--- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 3 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 66 +++++++++------------- 6 files changed, 85 insertions(+), 61 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 11c287a7ed52..2e6e2a96b4f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -276,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) * Called from stmmac via stmmac_dma_ops->init */ static void sun8i_dwmac_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { - /* Write TX and RX descriptors address */ - writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST); - writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST); - writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); } +static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* Write RX descriptors address */ + writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST); +} + +static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* Write TX descriptors address */ + writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST); +} + /* sun8i_dwmac_dump_regs() - Dump EMAC address space * Called from stmmac_dma_ops->dump_regs * Used for ethtool @@ -492,6 +503,8 @@ static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode, static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { .reset = sun8i_dwmac_dma_reset, .init = sun8i_dwmac_dma_init, + .init_rx_chan = sun8i_dwmac_dma_init_rx, + .init_tx_chan = sun8i_dwmac_dma_init_tx, .dump_regs = sun8i_dwmac_dump_regs, .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx, .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index d7447b05f7a5..aacc4aa80e3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) } static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; @@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} - /* RX/TX descriptor base address lists must be written into - * DMA CSR3 and CSR4, respectively - */ - writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); - writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); +static void dwmac1000_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* RX descriptor base address list must be written into DMA CSR3 */ + writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac1000_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* TX descriptor base address list must be written into DMA CSR4 */ + writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR); } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -264,6 +273,8 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac1000_dma_init, + .init_rx_chan = dwmac1000_dma_init_rx, + .init_tx_chan = dwmac1000_dma_init_tx, .axi = dwmac1000_dma_axi, .dump_regs = dwmac1000_dump_dma_regs, .dma_rx_mode = dwmac1000_dma_operation_mode_rx, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 80339d3d71c7..21dee25ee570 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -29,8 +29,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), @@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr, /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} - /* RX/TX descriptor base addr lists must be written into - * DMA CSR3 and CSR4, respectively - */ - writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); - writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); +static void dwmac100_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* RX descriptor base addr lists must be written into DMA CSR3 */ + writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac100_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* TX descriptor base addr lists must be written into DMA CSR4 */ + writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR); } /* Store and Forward capability is not used at all. @@ -112,6 +121,8 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, const struct stmmac_dma_ops dwmac100_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac100_dma_init, + .init_rx_chan = dwmac100_dma_init_rx, + .init_tx_chan = dwmac100_dma_init_tx, .dump_regs = dwmac100_dump_dma_regs, .dma_tx_mode = dwmac100_dma_operation_mode_tx, .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 9aab5b35e2d8..bf8e5a16f11c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -120,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 06fb20b27abf..1c674d61060c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -140,7 +140,7 @@ struct stmmac_dma_ops { /* DMA core initialization */ int (*reset)(void __iomem *ioaddr); void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds); + int atds); void (*init_chan)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan); void (*init_rx_chan)(void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a4d6ea7eb051..34c1fcc23fb6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2138,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; - u32 dummy_dma_rx_phy = 0; - u32 dummy_dma_tx_phy = 0; u32 chan = 0; int atds = 0; int ret = 0; @@ -2160,48 +2159,39 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) return ret; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, - dummy_dma_tx_phy, dummy_dma_rx_phy, atds); - - /* DMA RX Channel Configuration */ - for (chan = 0; chan < rx_channels_count; chan++) { - rx_q = &priv->rx_queue[chan]; - - stmmac_init_rx_chan(priv, priv->ioaddr, - priv->plat->dma_cfg, rx_q->dma_rx_phy, - chan); - - rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (DMA_RX_SIZE * sizeof(struct dma_desc)); - stmmac_set_rx_tail_ptr(priv, priv->ioaddr, - rx_q->rx_tail_addr, chan); - } - - /* DMA TX Channel Configuration */ - for (chan = 0; chan < tx_channels_count; chan++) { - tx_q = &priv->tx_queue[chan]; + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->rx_queue[chan]; - stmmac_init_chan(priv, priv->ioaddr, - priv->plat->dma_cfg, chan); + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); - stmmac_init_tx_chan(priv, priv->ioaddr, - priv->plat->dma_cfg, tx_q->dma_tx_phy, - chan); + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + } - tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, - tx_q->tx_tail_addr, chan); - } - } else { - rx_q = &priv->rx_queue[chan]; + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_channels_count; chan++) { tx_q = &priv->tx_queue[chan]; - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + + (DMA_TX_SIZE * sizeof(struct dma_desc)); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); } + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + + /* DMA Configuration */ + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); -- cgit v1.2.3 From f1565c60210f049e068549bfc2a945331e0ee288 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:06 +0100 Subject: net: stmmac: Remove uneeded check for GMAC version in stmmac_xmit We either have .enable_dma_transmission or .set_tx_tail_ptr in the HW table callbacks, we can never have both so there is no need to check for GMAC version. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h | 1 - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 8474bf961dd0..c63c1fe3f26b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -184,7 +184,6 @@ #define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 int dwmac4_dma_reset(void __iomem *ioaddr); -void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 34c1fcc23fb6..1e7ded6dd7b3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3166,11 +3166,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - if (priv->synopsys_id < DWMAC_CORE_4_00) - stmmac_enable_dma_transmission(priv, priv->ioaddr); - else - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, - queue); + stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; -- cgit v1.2.3 From 357951cdf0dcdcc2bacb00c2e7665217a2fe34bf Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:07 +0100 Subject: net: stmmac: Uniformize set_rx_owner() Currently an if condition is used to select the correct callback to set rx_onwer in descriptor. Lets keep this simple and always use the same callback. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 12 ++++++------ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 2 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +---- 5 files changed, 10 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 119a2f93dd55..63f869ce28af 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p) p->des3 |= cpu_to_le32(TDES3_OWN); } -static void dwmac4_set_rx_owner(struct dma_desc *p) +static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 |= cpu_to_le32(RDES3_OWN); + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); } static int dwmac4_get_tx_ls(struct dma_desc *p) @@ -292,10 +295,7 @@ exit: static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); - - if (!disable_rx_ic) - p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); + dwmac4_set_rx_owner(p, disable_rx_ic); } static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 17cd26fb2a3d..743a60f6ba2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p) p->des0 |= cpu_to_le32(ETDES0_OWN); } -static void enh_desc_set_rx_owner(struct dma_desc *p) +static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { p->des0 |= cpu_to_le32(RDES0_OWN); } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 1c674d61060c..06b5e5b15069 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -59,7 +59,7 @@ struct stmmac_desc_ops { /* Get the buffer size from the descriptor */ int (*get_tx_len)(struct dma_desc *p); /* Handle extra events on specific interrupts hw dependent */ - void (*set_rx_owner)(struct dma_desc *p); + void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic); /* Get the receive frame size */ int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); /* Return the reception status looking at the RDES1 */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index a7b221b59178..2facdb5d1aa0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p) p->des0 |= cpu_to_le32(TDES0_OWN); } -static void ndesc_set_rx_owner(struct dma_desc *p) +static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { p->des0 |= cpu_to_le32(RDES0_OWN); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1e7ded6dd7b3..35ccf3f8a5a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3262,10 +3262,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) } dma_wmb(); - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0); - else - stmmac_set_rx_owner(priv, p); + stmmac_set_rx_owner(priv, p, priv->use_riwt); dma_wmb(); -- cgit v1.2.3 From d2df9ea0ade7ef73710603da97b72394c53a111c Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:08 +0100 Subject: net: stmmac: Let descriptor code get skbuff address Stop using if conditions depending on the GMAC version for getting the descriptor skbuff address and use instead a helper implemented in the descriptor files. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 4 ++++ drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 6 ++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +----- 5 files changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 63f869ce28af..20299f6f65fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -424,6 +424,11 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); } +static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des0); +} + static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des0 = cpu_to_le32(addr); @@ -459,6 +464,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .init_tx_desc = dwmac4_rd_init_tx_desc, .display_ring = dwmac4_display_ring, .set_mss = dwmac4_set_mss_ctxt, + .get_addr = dwmac4_get_addr, .set_addr = dwmac4_set_addr, .clear = dwmac4_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 743a60f6ba2b..77914c89d749 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -437,6 +437,11 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des2); +} + static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des2 = cpu_to_le32(addr); @@ -467,6 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .display_ring = enh_desc_display_ring, + .get_addr = enh_desc_get_addr, .set_addr = enh_desc_set_addr, .clear = enh_desc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 06b5e5b15069..f499a7fad6f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -79,6 +79,8 @@ struct stmmac_desc_ops { void (*display_ring)(void *head, unsigned int size, bool rx); /* set MSS via context descriptor */ void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* get descriptor skbuff address */ + void (*get_addr)(struct dma_desc *p, unsigned int *addr); /* set descriptor skbuff address */ void (*set_addr)(struct dma_desc *p, dma_addr_t addr); /* clear descriptor */ @@ -127,6 +129,8 @@ struct stmmac_desc_ops { stmmac_do_void_callback(__priv, desc, display_ring, __args) #define stmmac_set_mss(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_mss, __args) +#define stmmac_get_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_addr, __args) #define stmmac_set_desc_addr(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_addr, __args) #define stmmac_clear_desc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 2facdb5d1aa0..de65bb29feba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -297,6 +297,11 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des2); +} + static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des2 = cpu_to_le32(addr); @@ -326,6 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = { .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .display_ring = ndesc_display_ring, + .get_addr = ndesc_get_addr, .set_addr = ndesc_set_addr, .clear = ndesc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 35ccf3f8a5a7..f2687ec3c774 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3350,11 +3350,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) int frame_len; unsigned int des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - des = le32_to_cpu(p->des0); - else - des = le32_to_cpu(p->des2); - + stmmac_get_desc_addr(priv, p, &des); frame_len = stmmac_get_rx_frame_len(priv, p, coe); /* If frame length is greater than skb buffer size -- cgit v1.2.3 From 61fac60a6ab72efb3b1ef7d052724fbeed52fe97 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 14:56:09 +0100 Subject: net: stmmac: Remove if condition by taking advantage of hwif return code We can remove the if condition and check if return code is different than -EINVAL, meaning callback is present. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2687ec3c774..c32de53a00d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3644,6 +3644,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); + int mtl_status; if (unlikely(status)) { /* For LPI we need to save the tx status */ @@ -3653,20 +3654,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->tx_path_in_lpi_mode = false; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - for (queue = 0; queue < queues_count; queue++) { - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[queue]; + for (queue = 0; queue < queues_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - status |= stmmac_host_mtl_irq_status(priv, - priv->hw, queue); + mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, + queue); + if (mtl_status != -EINVAL) + status |= mtl_status; - if (status & CORE_IRQ_MTL_RX_OVERFLOW) - stmmac_set_rx_tail_ptr(priv, - priv->ioaddr, - rx_q->rx_tail_addr, - queue); - } + if (status & CORE_IRQ_MTL_RX_OVERFLOW) + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, + queue); } /* PCS link status */ -- cgit v1.2.3 From 64a2658b58ab519dc17aa3ec5754eedcbdb68bde Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 17 May 2018 21:23:05 +0200 Subject: net: mscc: Add SPDX identifier ocelot_qsys.h is missing the SPDX identfier, fix that. Signed-off-by: Alexandre Belloni Reviewed-by: Allan W. Nielsen Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_qsys.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h index aa7267d5ca77..d8c63aa761be 100644 --- a/drivers/net/ethernet/mscc/ocelot_qsys.h +++ b/drivers/net/ethernet/mscc/ocelot_qsys.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Microsemi Ocelot Switch driver * - * License: Dual MIT/GPL * Copyright (c) 2017 Microsemi Corporation */ -- cgit v1.2.3 From 8f678036f9780c7bf93241f09cf4f269e090ea94 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 18 May 2018 11:09:22 +0100 Subject: hippi: fix spelling mistake: "Framming" -> "Framing" Trivial fix to spelling mistake in printk message text Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/hippi/rrunner.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 1ab97d99b9ba..f41116488079 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -867,7 +867,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) dev->name); goto drop; case E_FRM_ERR: - printk(KERN_WARNING "%s: Framming Error\n", + printk(KERN_WARNING "%s: Framing Error\n", dev->name); goto drop; case E_FLG_SYN_ERR: -- cgit v1.2.3 From a3302baa2c74f09823bee9a1b8a09838059d9182 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Fri, 18 May 2018 14:34:51 +0200 Subject: net: mvpp2: typo and cosmetic fixes This patch on the Marvell PPv2 driver is only cosmetic. Two typos are removed as well as other cosmetic fixes, such as extra new lines or tabs vs spaces. Suggested-by: Stefan Chulski Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 7ba7e5938c2e..8a071ac79aa4 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1756,7 +1756,6 @@ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { - if (!(enable & BIT(i))) continue; @@ -1840,7 +1839,6 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { - if (!(mask & BIT(i))) continue; @@ -4936,7 +4934,7 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port) if (port->gop_id == 0) { val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | - MVPP22_XLG_EXT_INT_MASK_GIG); + MVPP22_XLG_EXT_INT_MASK_GIG); writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); } @@ -5470,7 +5468,6 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) MVPP2_AGGR_TXQ_UPDATE_REG, pending); } - /* Check if there are enough free descriptors in aggregated txq. * If not, update the number of occupied descriptors and repeat the check. * @@ -5550,7 +5547,7 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); - /* OK, the descriptor cound has been updated: check again. */ + /* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) return -ENOMEM; return 0; @@ -6032,7 +6029,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT - * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS + * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS */ desc_per_txq = 16; desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + @@ -6602,8 +6599,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, mvpp2_txdesc_size_set(port, tx_desc, frag->size); buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, - frag->size, - DMA_TO_DEVICE); + frag->size, DMA_TO_DEVICE); if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); goto cleanup; -- cgit v1.2.3 From 80a95a80d358ded600a517ea0a93fe86b8f8da84 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 18 May 2018 19:12:53 +0530 Subject: cxgb4: collect SGE PF/VF queue map For T6, collect info on queue mapping to corresponding PF/VF in SGE. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 17 ++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 47 ++++++++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 3 +- 3 files changed, 65 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 740a18ba4229..c333e25620a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -62,6 +62,18 @@ struct cudbg_hw_sched { u32 map; }; +#define SGE_QBASE_DATA_REG_NUM 4 + +struct sge_qbase_reg_field { + u32 reg_addr; + u32 reg_data[SGE_QBASE_DATA_REG_NUM]; + /* Max supported PFs */ + u32 pf_data_value[PCIE_FW_MASTER_M + 1][SGE_QBASE_DATA_REG_NUM]; + /* Max supported VFs */ + u32 vf_data_value[T6_VF_M + 1][SGE_QBASE_DATA_REG_NUM]; + u32 vfcount; /* Actual number of max vfs in current configuration */ +}; + struct ireg_field { u32 ireg_addr; u32 ireg_data; @@ -357,6 +369,11 @@ static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { {0x10cc, 0x10d4, 0x0, 16}, }; +static const u32 t6_sge_qbase_index_array[] = { + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, +}; + static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 4feb7eca0acf..0afcfe99bff3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1339,16 +1339,39 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } +static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, + struct sge_qbase_reg_field *qbase, + u32 func, bool is_pf) +{ + u32 *buff, i; + + if (is_pf) { + buff = qbase->pf_data_value[func]; + } else { + buff = qbase->vf_data_value[func]; + /* In SGE_QBASE_INDEX, + * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. + */ + func += 8; + } + + t4_write_reg(padap, qbase->reg_addr, func); + for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++) + *buff = t4_read_reg(padap, qbase->reg_data[i]); +} + int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + struct sge_qbase_reg_field *sge_qbase; struct ireg_buf *ch_sge_dbg; int i, rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2, + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), &temp_buff); if (rc) return rc; @@ -1370,6 +1393,28 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, sge_pio->ireg_local_offset); ch_sge_dbg++; } + + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg + * SGE_QBASE_MAP[0-3] + */ + sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; + for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++) + sge_qbase->reg_data[i] = + t6_sge_qbase_index_array[i + 1]; + + for (i = 0; i <= PCIE_FW_MASTER_M; i++) + cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, + i, true); + + for (i = 0; i < padap->params.arch.vfcount; i++) + cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, + i, false); + + sge_qbase->vfcount = padap->params.arch.vfcount; + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 085691eb2b95..8d751efcb90e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -214,7 +214,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct ireg_buf) * n; break; case CUDBG_SGE_INDIRECT: - len = sizeof(struct ireg_buf) * 2; + len = sizeof(struct ireg_buf) * 2 + + sizeof(struct sge_qbase_reg_field); break; case CUDBG_ULPRX_LA: len = sizeof(struct cudbg_ulprx_la); -- cgit v1.2.3 From eb38401c779d350e9e31396471ea072fa29aec9b Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 18 May 2018 16:54:38 +0100 Subject: net: stmmac: Populate missing callbacks in HWIF initialization Some HW specific setups, like sun8i, do not populate all the necessary callbacks, which is what HWIF helpers were expecting. Fix this by always trying to get the generic helpers and populate them if they were not previously populated by HW specific setup. Signed-off-by: Jose Abreu Fixes: 5f0456b43140 ("net: stmmac: Implement logic to automatically select HW Interface") Reported-by: Corentin Labbe Tested-by: Corentin Labbe Cc: Corentin Labbe Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/hwif.c | 38 +++++++++++++++++------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 23a12649e247..14770fc8865e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -189,13 +189,16 @@ int stmmac_hwif_init(struct stmmac_priv *priv) bool needs_gmac = priv->plat->has_gmac; const struct stmmac_hwif_entry *entry; struct mac_device_info *mac; + bool needs_setup = true; int i, ret; u32 id; if (needs_gmac) { id = stmmac_get_id(priv, GMAC_VERSION); - } else { + } else if (needs_gmac4) { id = stmmac_get_id(priv, GMAC4_VERSION); + } else { + id = 0; } /* Save ID for later use */ @@ -209,13 +212,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv) /* Check for HW specific setup first */ if (priv->plat->setup) { - priv->hw = priv->plat->setup(priv); - if (!priv->hw) - return -ENOMEM; - return 0; + mac = priv->plat->setup(priv); + needs_setup = false; + } else { + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); } - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); if (!mac) return -ENOMEM; @@ -227,24 +229,28 @@ int stmmac_hwif_init(struct stmmac_priv *priv) continue; if (needs_gmac4 ^ entry->gmac4) continue; - if (id < entry->min_id) + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) continue; - mac->desc = entry->desc; - mac->dma = entry->dma; - mac->mac = entry->mac; - mac->ptp = entry->hwtimestamp; - mac->mode = entry->mode; - mac->tc = entry->tc; + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; priv->hw = mac; priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; /* Entry found */ - ret = entry->setup(priv); - if (ret) - return ret; + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; + } /* Run quirks, if needed */ if (entry->quirks) { -- cgit v1.2.3 From 991f5b3651f6bb1cb5034f422e43f489e65f2701 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 18 May 2018 12:12:09 -0700 Subject: nfp: bpf: support logic indirect shifts (BPF_[L|R]SH | BPF_X) For indirect shifts, shift amount is not specified as constant, NFP needs to get the shift amount through the low 5 bits of source A operand in PREV_ALU, therefore extra instructions are needed compared with shifts by constants. Because NFP is 32-bit, so we are using register pair for 64-bit shifts and therefore would need different instruction sequences depending on whether shift amount is less than 32 or not. NFP branch-on-bit-test instruction emitter is added by this patch and is used for efficient runtime check on shift amount. We'd think the shift amount is less than 32 if bit 5 is clear and greater or equal than 32 otherwise. Shift amount is greater than or equal to 64 will result in undefined behavior. This patch also use range info to avoid generating unnecessary runtime code if we are certain shift amount is less than 32 or not. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 299 +++++++++++++++++++--- drivers/net/ethernet/netronome/nfp/bpf/main.h | 28 ++ drivers/net/ethernet/netronome/nfp/bpf/offload.c | 2 + drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 8 + drivers/net/ethernet/netronome/nfp/nfp_asm.h | 17 +- 5 files changed, 322 insertions(+), 32 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index a4d3da215863..4cff08771951 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -211,6 +211,60 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); } +static void +__emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, + bool set, bool src_lmextn) +{ + u16 addr_lo, addr_hi; + u64 insn; + + addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); + addr_hi = addr != addr_lo; + + insn = OP_BR_BIT_BASE | + FIELD_PREP(OP_BR_BIT_A_SRC, areg) | + FIELD_PREP(OP_BR_BIT_B_SRC, breg) | + FIELD_PREP(OP_BR_BIT_BV, set) | + FIELD_PREP(OP_BR_BIT_DEFBR, defer) | + FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | + FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | + FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void +emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, + u8 defer, bool set, enum nfp_relo_type relo) +{ + struct nfp_insn_re_regs reg; + int err; + + /* NOTE: The bit to test is specified as an rotation amount, such that + * the bit to test will be placed on the MSB of the result when + * doing a rotate right. For bit X, we need right rotate X + 1. + */ + bit += 1; + + err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); + if (err) { + nfp_prog->error = err; + return; + } + + __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, + reg.src_lmextn); + + nfp_prog->prog[nfp_prog->prog_len - 1] |= + FIELD_PREP(OP_RELO_TYPE, relo); +} + +static void +emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) +{ + emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); +} + static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, @@ -309,6 +363,19 @@ emit_shf(struct nfp_prog *nfp_prog, swreg dst, reg.dst_lmextn, reg.src_lmextn); } +static void +emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, + swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) +{ + if (sc == SHF_SC_R_ROT) { + pr_err("indirect shift is not allowed on rotation\n"); + nfp_prog->error = -EFAULT; + return; + } + + emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); +} + static void __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, @@ -1629,56 +1696,226 @@ static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } -static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - const struct bpf_insn *insn = &meta->insn; - u8 dst = insn->dst_reg * 2; - - if (insn->imm < 32) { - emit_shf(nfp_prog, reg_both(dst + 1), - reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), - SHF_SC_R_DSHF, 32 - insn->imm); - emit_shf(nfp_prog, reg_both(dst), - reg_none(), SHF_OP_NONE, reg_b(dst), - SHF_SC_L_SHF, insn->imm); - } else if (insn->imm == 32) { +/* Pseudo code: + * if shift_amt >= 32 + * dst_high = dst_low << shift_amt[4:0] + * dst_low = 0; + * else + * dst_high = (dst_high, dst_low) >> (32 - shift_amt) + * dst_low = dst_low << shift_amt + * + * The indirect shift will use the same logic at runtime. + */ +static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt < 32) { + emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), + SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, + 32 - shift_amt); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF, shift_amt); + } else if (shift_amt == 32) { wrp_reg_mov(nfp_prog, dst + 1, dst); wrp_immed(nfp_prog, reg_both(dst), 0); - } else if (insn->imm > 32) { - emit_shf(nfp_prog, reg_both(dst + 1), - reg_none(), SHF_OP_NONE, reg_b(dst), - SHF_SC_L_SHF, insn->imm - 32); + } else if (shift_amt > 32) { + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); wrp_immed(nfp_prog, reg_both(dst), 0); } return 0; } -static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - if (insn->imm < 32) { - emit_shf(nfp_prog, reg_both(dst), - reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), - SHF_SC_R_DSHF, insn->imm); - emit_shf(nfp_prog, reg_both(dst + 1), - reg_none(), SHF_OP_NONE, reg_b(dst + 1), - SHF_SC_R_SHF, insn->imm); - } else if (insn->imm == 32) { + return __shl_imm64(nfp_prog, dst, insn->imm); +} + +static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, + reg_b(src)); + emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF); +} + +/* NOTE: for indirect left shift, HIGH part should be calculated first. */ +static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF); +} + +static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + shl_reg64_lt32_high(nfp_prog, dst, src); + shl_reg64_lt32_low(nfp_prog, dst, src); +} + +static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF); + wrp_immed(nfp_prog, reg_both(dst), 0); +} + +static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __shl_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + shl_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + shl_reg64_ge32(nfp_prog, dst, src); + } else { + /* Generate different instruction sequences depending on runtime + * value of shift amount. + */ + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + + shl_reg64_lt32_high(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* shl_reg64_lt32_low packed in delay slot. */ + shl_reg64_lt32_low(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + shl_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; + } + + return 0; +} + +/* Pseudo code: + * if shift_amt >= 32 + * dst_high = 0; + * dst_low = dst_high >> shift_amt[4:0] + * else + * dst_high = dst_high >> shift_amt + * dst_low = (dst_high, dst_low) >> shift_amt + * + * The indirect shift will use the same logic at runtime. + */ +static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt < 32) { + emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF, shift_amt); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); + } else if (shift_amt == 32) { wrp_reg_mov(nfp_prog, dst, dst + 1); wrp_immed(nfp_prog, reg_both(dst + 1), 0); - } else if (insn->imm > 32) { - emit_shf(nfp_prog, reg_both(dst), - reg_none(), SHF_OP_NONE, reg_b(dst + 1), - SHF_SC_R_SHF, insn->imm - 32); + } else if (shift_amt > 32) { + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); wrp_immed(nfp_prog, reg_both(dst + 1), 0); } return 0; } +static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + return __shr_imm64(nfp_prog, dst, insn->imm); +} + +/* NOTE: for indirect right shift, LOW part should be calculated first. */ +static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF); +} + +static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF); +} + +static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + shr_reg64_lt32_low(nfp_prog, dst, src); + shr_reg64_lt32_high(nfp_prog, dst, src); +} + +static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); +} + +static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __shr_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + shr_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + shr_reg64_ge32(nfp_prog, dst, src); + } else { + /* Generate different instruction sequences depending on runtime + * value of shift amount. + */ + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + shr_reg64_lt32_low(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* shr_reg64_lt32_high packed in delay slot. */ + shr_reg64_lt32_high(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + shr_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; + } + + return 0; +} + static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2501,7 +2738,9 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, + [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, + [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 8b143546ae85..654fe7823e5e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -263,6 +263,8 @@ struct nfp_bpf_reg_state { * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions + * @umin: copy of core verifier umin_value. + * @umax: copy of core verifier umax_value. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -298,6 +300,13 @@ struct nfp_insn_meta { struct bpf_reg_state arg1; struct nfp_bpf_reg_state arg2; }; + /* We are interested in range info for some operands, + * for example, the shift amount. + */ + struct { + u64 umin; + u64 umax; + }; }; unsigned int off; unsigned short n; @@ -375,6 +384,25 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } +static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) +{ + u8 code = meta->insn.code; + bool is_alu, is_shift; + u8 opclass, opcode; + + opclass = BPF_CLASS(code); + is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; + if (!is_alu) + return false; + + opcode = BPF_OP(code); + is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; + if (!is_shift) + return false; + + return BPF_SRC(code) == BPF_X; +} + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 4db0ac1e42a8..7eae4c0266f8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -190,6 +190,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; + if (is_mbpf_indir_shift(meta)) + meta->umin = U64_MAX; list_add_tail(&meta->l, &nfp_prog->insns); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 844a9be6e55a..4bfeba7b21b2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -551,6 +551,14 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); + if (is_mbpf_indir_shift(meta)) { + const struct bpf_reg_state *sreg = + cur_regs(env) + meta->insn.src_reg; + + meta->umin = min(meta->umin, sreg->umin_value); + meta->umax = max(meta->umax, sreg->umax_value); + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index faa4e131c136..fa826bd9c668 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -72,8 +72,21 @@ #define OP_BR_ADDR_LO 0x007ffc00000ULL #define OP_BR_ADDR_HI 0x10000000000ULL -#define nfp_is_br(_insn) \ - (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE) +#define OP_BR_BIT_BASE 0x0d000000000ULL +#define OP_BR_BIT_BASE_MASK 0x0f800080300ULL +#define OP_BR_BIT_A_SRC 0x000000000ffULL +#define OP_BR_BIT_B_SRC 0x0000003fc00ULL +#define OP_BR_BIT_BV 0x00000040000ULL +#define OP_BR_BIT_SRC_LMEXTN 0x40000000000ULL +#define OP_BR_BIT_DEFBR OP_BR_DEFBR +#define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO +#define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI + +static inline bool nfp_is_br(u64 insn) +{ + return (insn & OP_BR_BASE_MASK) == OP_BR_BASE || + (insn & OP_BR_BIT_BASE_MASK) == OP_BR_BIT_BASE; +} enum br_mask { BR_BEQ = 0x00, -- cgit v1.2.3 From f43d0f17fe9a4bb770ab38b36e2b5150d8c3d6cf Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 18 May 2018 12:12:10 -0700 Subject: nfp: bpf: support arithmetic right shift by constant (BPF_ARSH | BPF_K) Code logic is similar with logic right shift except we also need to set PREV_ALU result properly, the MSB of which is the bit that will be replicated to fill in all the vacant positions. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 34 ++++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 1 + 2 files changed, 35 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4cff08771951..f73242c4da2f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1916,6 +1916,39 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +/* Code logic is the same as __shr_imm64 except ashr requires signedness bit + * told through PREV_ALU result. + */ +static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + if (insn->imm < 32) { + emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF, insn->imm); + /* Set signedness bit. */ + emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, + reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, insn->imm); + } else if (insn->imm == 32) { + /* NOTE: this also helps setting signedness bit. */ + wrp_reg_mov(nfp_prog, dst, dst + 1); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); + } else if (insn->imm > 32) { + emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, + reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, insn->imm - 32); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); + } + + return 0; +} + static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2742,6 +2775,7 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, + [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index fa826bd9c668..f6677bc9875a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -174,6 +174,7 @@ enum shf_op { SHF_OP_NONE = 0, SHF_OP_AND = 2, SHF_OP_OR = 5, + SHF_OP_ASHR = 6, }; enum shf_sc { -- cgit v1.2.3 From c217abccaaa5e4eeba4aee26f29cdb57c026afc1 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 18 May 2018 12:12:11 -0700 Subject: nfp: bpf: support arithmetic indirect right shift (BPF_ARSH | BPF_X) Code logic is similar with arithmetic right shift by constant, and NFP get indirect shift amount through source A operand of PREV_ALU. It is possible to fall back to logic right shift if the MSB is known to be zero from range info, however there is no benefit to do this given logic indirect right shift use the same number and cycle of instruction sequence. Suppose the MSB of regX is the bit we want to replicate to fill in all the vacant positions, and regY contains the shift amount, then we could use single instruction to set up both. [alu, --, regY, OR, regX] -- NOTE: the PREV_ALU result doesn't need to write to any destination register. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 99 +++++++++++++++++++++++++--- 1 file changed, 89 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index f73242c4da2f..8a92088df0d7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1919,29 +1919,26 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* Code logic is the same as __shr_imm64 except ashr requires signedness bit * told through PREV_ALU result. */ -static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) { - const struct bpf_insn *insn = &meta->insn; - u8 dst = insn->dst_reg * 2; - - if (insn->imm < 32) { + if (shift_amt < 32) { emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, - reg_b(dst), SHF_SC_R_DSHF, insn->imm); + reg_b(dst), SHF_SC_R_DSHF, shift_amt); /* Set signedness bit. */ emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, reg_imm(0)); emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, - reg_b(dst + 1), SHF_SC_R_SHF, insn->imm); - } else if (insn->imm == 32) { + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); + } else if (shift_amt == 32) { /* NOTE: this also helps setting signedness bit. */ wrp_reg_mov(nfp_prog, dst, dst + 1); emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, reg_b(dst + 1), SHF_SC_R_SHF, 31); - } else if (insn->imm > 32) { + } else if (shift_amt > 32) { emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, reg_imm(0)); emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, - reg_b(dst + 1), SHF_SC_R_SHF, insn->imm - 32); + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, reg_b(dst + 1), SHF_SC_R_SHF, 31); } @@ -1949,6 +1946,87 @@ static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + return __ashr_imm64(nfp_prog, dst, insn->imm); +} + +static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + /* NOTE: the first insn will set both indirect shift amount (source A) + * and signedness bit (MSB of result). + */ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF); +} + +static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + /* NOTE: it is the same as logic shift because we don't need to shift in + * signedness bit when the shift amount is less than 32. + */ + return shr_reg64_lt32_low(nfp_prog, dst, src); +} + +static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + ashr_reg64_lt32_low(nfp_prog, dst, src); + ashr_reg64_lt32_high(nfp_prog, dst, src); +} + +static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); +} + +/* Like ashr_imm64, but need to use indirect shift. */ +static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __ashr_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + ashr_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + ashr_reg64_ge32(nfp_prog, dst, src); + } else { + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + ashr_reg64_lt32_low(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* ashr_reg64_lt32_high packed in delay slot. */ + ashr_reg64_lt32_high(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + ashr_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; + } + + return 0; +} + static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2775,6 +2853,7 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, + [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, -- cgit v1.2.3 From b9ffcbaf56d3040efee64d3818688083c29b2a44 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 18 May 2018 09:29:00 +0200 Subject: devlink: introduce devlink_port_attrs_set Change existing setter for split port information into more generic attrs setter. Alongside with that, allow to set port number and subport number for split ports. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 7 ++-- drivers/net/ethernet/mellanox/mlxsw/core.h | 3 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +-- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 4 +-- include/net/devlink.h | 20 +++++++---- include/uapi/linux/devlink.h | 3 ++ net/core/devlink.c | 46 ++++++++++++++++++------ 8 files changed, 64 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e13ac3b8dff7..958769689ca2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1714,15 +1714,16 @@ EXPORT_SYMBOL(mlxsw_core_port_fini); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev, - bool split, u32 split_group) + u32 port_number, bool split, + u32 split_port_subnumber) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; mlxsw_core_port->port_driver_priv = port_driver_priv; - if (split) - devlink_port_split_set(devlink_port, split_group); + devlink_port_attrs_set(devlink_port, port_number, + split, split_port_subnumber); devlink_port_type_eth_set(devlink_port, dev); } EXPORT_SYMBOL(mlxsw_core_port_eth_set); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 092d39399f3c..e65287c15afd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -201,7 +201,8 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev, - bool split, u32 split_group); + u32 port_number, bool split, + u32 split_port_subnumber); void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 94132f6cec61..96f6d8af1fd8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2927,8 +2927,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, - mlxsw_sp_port, dev, mlxsw_sp_port->split, - module); + mlxsw_sp_port, dev, module + 1, + mlxsw_sp_port->split, lane / width); mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index a655c5850aa6..551e05067b1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1149,7 +1149,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port, - mlxsw_sx_port, dev, false, 0); + mlxsw_sx_port, dev, module + 1, false, 0); mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index eb0fc614673d..d7a768ff3112 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -175,8 +175,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; devlink_port_type_eth_set(&port->dl_port, port->netdev); - if (eth_port.is_split) - devlink_port_split_set(&port->dl_port, eth_port.label_port); + devlink_port_attrs_set(&port->dl_port, eth_port.label_port, + eth_port.is_split, eth_port.label_subport); devlink = priv_to_devlink(app->pf); diff --git a/include/net/devlink.h b/include/net/devlink.h index 2e4f71e16e95..d6ca92266709 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -35,6 +35,13 @@ struct devlink { char priv[0] __aligned(NETDEV_ALIGN); }; +struct devlink_port_attrs { + bool set; + u32 port_number; /* same value as "split group" */ + bool split; + u32 split_subport_number; +}; + struct devlink_port { struct list_head list; struct devlink *devlink; @@ -43,8 +50,7 @@ struct devlink_port { enum devlink_port_type type; enum devlink_port_type desired_type; void *type_dev; - bool split; - u32 split_group; + struct devlink_port_attrs attrs; }; struct devlink_sb_pool_info { @@ -367,8 +373,9 @@ void devlink_port_type_eth_set(struct devlink_port *devlink_port, void devlink_port_type_ib_set(struct devlink_port *devlink_port, struct ib_device *ibdev); void devlink_port_type_clear(struct devlink_port *devlink_port); -void devlink_port_split_set(struct devlink_port *devlink_port, - u32 split_group); +void devlink_port_attrs_set(struct devlink_port *devlink_port, + u32 port_number, bool split, + u32 split_subport_number); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, @@ -466,8 +473,9 @@ static inline void devlink_port_type_clear(struct devlink_port *devlink_port) { } -static inline void devlink_port_split_set(struct devlink_port *devlink_port, - u32 split_group) +static inline void devlink_port_attrs_set(struct devlink_port *devlink_port, + u32 port_number, bool split, + u32 split_subport_number) { } diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 1df65a4c2044..15b031a5ee7a 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -224,6 +224,9 @@ enum devlink_attr { DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */ + DEVLINK_ATTR_PORT_NUMBER, /* u32 */ + DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index ad1317376798..8fde7d2df9b0 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -453,6 +453,25 @@ static void devlink_notify(struct devlink *devlink, enum devlink_command cmd) msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); } +static int devlink_nl_port_attrs_put(struct sk_buff *msg, + struct devlink_port *devlink_port) +{ + struct devlink_port_attrs *attrs = &devlink_port->attrs; + + if (!attrs->set) + return 0; + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->port_number)) + return -EMSGSIZE; + if (!attrs->split) + return 0; + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP, attrs->port_number)) + return -EMSGSIZE; + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, + attrs->split_subport_number)) + return -EMSGSIZE; + return 0; +} + static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, enum devlink_command cmd, u32 portid, @@ -492,9 +511,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, ibdev->name)) goto nla_put_failure; } - if (devlink_port->split && - nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP, - devlink_port->split_group)) + if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -2971,19 +2988,28 @@ void devlink_port_type_clear(struct devlink_port *devlink_port) EXPORT_SYMBOL_GPL(devlink_port_type_clear); /** - * devlink_port_split_set - Set port is split + * devlink_port_attrs_set - Set port attributes * * @devlink_port: devlink port - * @split_group: split group - identifies group split port is part of + * @port_number: number of the port that is facing user, for example + * the front panel port number + * @split: indicates if this is split port + * @split_subport_number: if the port is split, this is the number + * of subport. */ -void devlink_port_split_set(struct devlink_port *devlink_port, - u32 split_group) +void devlink_port_attrs_set(struct devlink_port *devlink_port, + u32 port_number, bool split, + u32 split_subport_number) { - devlink_port->split = true; - devlink_port->split_group = split_group; + struct devlink_port_attrs *attrs = &devlink_port->attrs; + + attrs->set = true; + attrs->port_number = port_number; + attrs->split = split; + attrs->split_subport_number = split_subport_number; devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); } -EXPORT_SYMBOL_GPL(devlink_port_split_set); +EXPORT_SYMBOL_GPL(devlink_port_attrs_set); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, -- cgit v1.2.3 From 5ec1380a21bb6cd2ba89e31c44dfcc150f9ef792 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 18 May 2018 09:29:01 +0200 Subject: devlink: extend attrs_set for setting port flavours Devlink ports can have specific flavour according to the purpose of use. This patch extend attrs_set so the driver can say which flavour port has. Initial flavours are: physical, cpu, dsa User can query this to see right away what is the purpose of each port. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 4 ++-- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 5 +++-- include/net/devlink.h | 3 +++ include/uapi/linux/devlink.h | 11 +++++++++++ net/core/devlink.c | 5 +++++ 5 files changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 958769689ca2..a720aa11bcc0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1722,8 +1722,8 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; mlxsw_core_port->port_driver_priv = port_driver_priv; - devlink_port_attrs_set(devlink_port, port_number, - split, split_port_subnumber); + devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + port_number, split, split_port_subnumber); devlink_port_type_eth_set(devlink_port, dev); } EXPORT_SYMBOL(mlxsw_core_port_eth_set); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index d7a768ff3112..b1e67cf4257a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -175,8 +175,9 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; devlink_port_type_eth_set(&port->dl_port, port->netdev); - devlink_port_attrs_set(&port->dl_port, eth_port.label_port, - eth_port.is_split, eth_port.label_subport); + devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + eth_port.label_port, eth_port.is_split, + eth_port.label_subport); devlink = priv_to_devlink(app->pf); diff --git a/include/net/devlink.h b/include/net/devlink.h index d6ca92266709..6eae15cf0f57 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -37,6 +37,7 @@ struct devlink { struct devlink_port_attrs { bool set; + enum devlink_port_flavour flavour; u32 port_number; /* same value as "split group" */ bool split; u32 split_subport_number; @@ -374,6 +375,7 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port, struct ib_device *ibdev); void devlink_port_type_clear(struct devlink_port *devlink_port); void devlink_port_attrs_set(struct devlink_port *devlink_port, + enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_subport_number); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, @@ -474,6 +476,7 @@ static inline void devlink_port_type_clear(struct devlink_port *devlink_port) } static inline void devlink_port_attrs_set(struct devlink_port *devlink_port, + enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_subport_number) { diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 15b031a5ee7a..75cb5450c851 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -132,6 +132,16 @@ enum devlink_eswitch_encap_mode { DEVLINK_ESWITCH_ENCAP_MODE_BASIC, }; +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, /* Any kind of a port physically + * facing the user. + */ + DEVLINK_PORT_FLAVOUR_CPU, /* CPU port */ + DEVLINK_PORT_FLAVOUR_DSA, /* Distributed switch architecture + * interconnect port. + */ +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -224,6 +234,7 @@ enum devlink_attr { DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */ + DEVLINK_ATTR_PORT_FLAVOUR, /* u16 */ DEVLINK_ATTR_PORT_NUMBER, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 8fde7d2df9b0..af90d237cbc2 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -460,6 +460,8 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (!attrs->set) return 0; + if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) + return -EMSGSIZE; if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->port_number)) return -EMSGSIZE; if (!attrs->split) @@ -2991,6 +2993,7 @@ EXPORT_SYMBOL_GPL(devlink_port_type_clear); * devlink_port_attrs_set - Set port attributes * * @devlink_port: devlink port + * @flavour: flavour of the port * @port_number: number of the port that is facing user, for example * the front panel port number * @split: indicates if this is split port @@ -2998,12 +3001,14 @@ EXPORT_SYMBOL_GPL(devlink_port_type_clear); * of subport. */ void devlink_port_attrs_set(struct devlink_port *devlink_port, + enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_subport_number) { struct devlink_port_attrs *attrs = &devlink_port->attrs; attrs->set = true; + attrs->flavour = flavour; attrs->port_number = port_number; attrs->split = split; attrs->split_subport_number = split_subport_number; -- cgit v1.2.3 From ec932fbda7ba6ac10dba59fd20c736be9a6976e4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 18 May 2018 09:29:04 +0200 Subject: mlxsw: use devlink helper to generate physical port name Since devlink knows the info needed to generate the physical port name in a generic way for all devlink users, use the helper to do the job. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 11 +++++++++++ drivers/net/ethernet/mellanox/mlxsw/core.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 17 +++-------------- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 9 +++------ 4 files changed, 19 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a720aa11bcc0..a38faec45b30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1763,6 +1763,17 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_type_get); +int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, + u8 local_port, char *name, size_t len) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + return devlink_port_get_phys_port_name(devlink_port, name, len); +} +EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); + static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index e65287c15afd..4eac7fbd07d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -209,6 +209,8 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, u8 local_port); +int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, + u8 local_port, char *name, size_t len); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 96f6d8af1fd8..bb252b36994d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1238,21 +1238,10 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - u8 module = mlxsw_sp_port->mapping.module; - u8 width = mlxsw_sp_port->mapping.width; - u8 lane = mlxsw_sp_port->mapping.lane; - int err; - - if (!mlxsw_sp_port->split) - err = snprintf(name, len, "p%d", module + 1); - else - err = snprintf(name, len, "p%ds%d", module + 1, - lane / width); - if (err >= len) - return -EINVAL; - - return 0; + return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, + mlxsw_sp_port->local_port, + name, len); } static struct mlxsw_sp_port_mall_tc_entry * diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 551e05067b1e..3922c1cfe5f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -417,13 +417,10 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); - int err; - - err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1); - if (err >= len) - return -EINVAL; - return 0; + return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core, + mlxsw_sx_port->local_port, + name, len); } static const struct net_device_ops mlxsw_sx_port_netdev_ops = { -- cgit v1.2.3 From 62c8a069b510d905039abd4097434f190a316941 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Fri, 18 May 2018 09:33:39 +0200 Subject: net: mvpp2: Add missing VLAN tag detection Marvell PPv2 Header Parser sets some bits in the 'result_info' field in each lookup iteration, to identify different packet attributes such as DSA / VLAN tag, protocol infos, etc. This is used in further classification stages in the controller. It's the DSA tag detection entry that is in charge of detecting when there is a single VLAN tag. This commits adds the missing update of the result_info in this case. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 8a071ac79aa4..6847cd431aa0 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -2128,6 +2128,9 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + /* Set result info bits to 'single vlan' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); /* If packet is tagged continue check vid filtering */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); } else { -- cgit v1.2.3 From 230c184679d5517e9770275853cd3456d00d6599 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 18 May 2018 21:30:18 +0300 Subject: sh_eth: add RGMII support The R-Car V3H (AKA R8A77980) GEther controller adds support for the RGMII PHY interface mode as a new value for the RMII_MII register. Based on the original (and large) patch by Vladimir Barinov. Signed-off-by: Vladimir Barinov Signed-off-by: Sergei Shtylyov Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8dd41e08a6c6..d214ec09d401 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -466,6 +466,9 @@ static void sh_eth_select_mii(struct net_device *ndev) u32 value; switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID: + value = 0x3; + break; case PHY_INTERFACE_MODE_GMII: value = 0x2; break; -- cgit v1.2.3 From 93f0fa75190fefee55408a1c0812fa54f576c61b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 18 May 2018 21:31:28 +0300 Subject: sh_eth: add EDMR.NBST support The R-Car V3H (AKA R8A77980) GEther controller adds the DMA burst mode bit (NBST) in EDMR and the manual tells to always set it before doing any DMA. Based on the original (and large) patch by Vladimir Barinov. Signed-off-by: Vladimir Barinov Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++++ drivers/net/ethernet/renesas/sh_eth.h | 2 ++ 2 files changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d214ec09d401..800c196510eb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1434,6 +1434,10 @@ static int sh_eth_dev_init(struct net_device *ndev) sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); + /* DMA transfer burst mode */ + if (mdp->cd->nbst) + sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST); + if (mdp->cd->bculr) sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a5b792ce2ae7..7d5aaba1384a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -184,6 +184,7 @@ enum GECMR_BIT { /* EDMR */ enum DMAC_M_BIT { + EDMR_NBST = 0x80, EDMR_EL = 0x40, /* Litte endian */ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST_GETHER = 0x03, @@ -505,6 +506,7 @@ struct sh_eth_cpu_data { unsigned bculr:1; /* EtherC have BCULR */ unsigned tsu:1; /* EtherC have TSU */ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ + unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */ unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ -- cgit v1.2.3 From 3eb9c2ad1db04913041b78e0b5e543527128b90b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 18 May 2018 21:32:46 +0300 Subject: sh_eth: add R8A77980 support Finally, add support for the DT probing of the R-Car V3H (AKA R8A77980) -- it's the only R-Car gen3 SoC having the GEther controller -- others have only EtherAVB... Based on the original (and large) patch by Vladimir Barinov. Signed-off-by: Vladimir Barinov Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/sh_eth.txt | 1 + drivers/net/ethernet/renesas/sh_eth.c | 44 ++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'drivers/net') diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt index 5172799a7f1a..82a4cf2c145d 100644 --- a/Documentation/devicetree/bindings/net/sh_eth.txt +++ b/Documentation/devicetree/bindings/net/sh_eth.txt @@ -14,6 +14,7 @@ Required properties: "renesas,ether-r8a7791" if the device is a part of R8A7791 SoC. "renesas,ether-r8a7793" if the device is a part of R8A7793 SoC. "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. + "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 800c196510eb..83148ca61317 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -753,6 +753,49 @@ static struct sh_eth_cpu_data rcar_gen2_data = { .rmiimode = 1, .magic = 1, }; + +/* R8A77980 */ +static struct sh_eth_cpu_data r8a77980_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | + EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .nbst = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .hw_checksum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; #endif /* CONFIG_OF */ static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -3134,6 +3177,7 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, + { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, -- cgit v1.2.3 From 1bc49fd18b02a4942d4bb6f46fd23a5fd61c9fd6 Mon Sep 17 00:00:00 2001 From: Hemanth Puranik Date: Fri, 18 May 2018 08:59:29 +0530 Subject: net: qcom/emac: Allocate buffers from local node Currently we use non-NUMA aware allocation for TPD and RRD buffers, this patch modifies to use NUMA friendly allocation. Signed-off-by: Hemanth Puranik Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 092718a03786..031f6e6ee9c1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -683,10 +683,11 @@ static int emac_tx_q_desc_alloc(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) { struct emac_ring_header *ring_header = &adpt->ring_header; + int node = dev_to_node(adpt->netdev->dev.parent); size_t size; size = sizeof(struct emac_buffer) * tx_q->tpd.count; - tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL); + tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node); if (!tx_q->tpd.tpbuff) return -ENOMEM; @@ -723,11 +724,12 @@ static void emac_rx_q_bufs_free(struct emac_adapter *adpt) static int emac_rx_descs_alloc(struct emac_adapter *adpt) { struct emac_ring_header *ring_header = &adpt->ring_header; + int node = dev_to_node(adpt->netdev->dev.parent); struct emac_rx_queue *rx_q = &adpt->rx_q; size_t size; size = sizeof(struct emac_buffer) * rx_q->rfd.count; - rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL); + rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node); if (!rx_q->rfd.rfbuff) return -ENOMEM; -- cgit v1.2.3 From f0b99e3ab323c72c480140532b0526f087a08c23 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 18 May 2018 11:58:30 -0700 Subject: Revert "ixgbe: release lock for the duration of ixgbe_suspend_close()" This reverts commit 6710f970d9979d8f03f6e292bb729b2ee1526d0e. Gotta love when developers have offline discussions, thinking everyone is reading their responses/dialog. The change had the potential for a number of race conditions on shutdown, which is why we are reverting the change. Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5ddfb93ed491..a52d92e182ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6698,15 +6698,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) rtnl_lock(); netif_device_detach(netdev); - if (netif_running(netdev)) { - /* Suspend takes a long time, device_shutdown may be - * parallelized this function, so drop lock for the - * duration of this call. - */ - rtnl_unlock(); + if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_lock(); - } ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); -- cgit v1.2.3 From 52f8560ee4684f8689085817225786dba3399f6f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 19 May 2018 10:29:33 +0200 Subject: r8169: fix network error on resume from suspend This commit removed calls to rtl_set_rx_mode(). This is ok for the standard path if the link is brought up, however it breaks system resume from suspend. Link comes up but no network traffic. Meanwhile common code from rtl_hw_start_8169/8101/8168() was moved to rtl_hw_start(), therefore re-add the call to rtl_set_rx_mode() there. Due to adding this call we have to move definition of rtl_hw_start() after definition of rtl_set_rx_mode(). Signed-off-by: Heiner Kallweit Fixes: 82d3ff6dd199 ("r8169: remove calls to rtl_set_rx_mode") Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 39 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2c2f0c5b3f94..75dfac0248f4 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5064,25 +5064,6 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); } -static void rtl_hw_start(struct rtl8169_private *tp) -{ - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - - tp->hw_start(tp); - - rtl_set_rx_max_size(tp); - rtl_set_rx_tx_desc_registers(tp); - rtl_set_rx_tx_config_registers(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(tp, IntrMask); - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); - rtl_irq_enable_all(tp); -} - static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) { static const struct rtl_cfg2_info { @@ -5160,6 +5141,26 @@ static void rtl_set_rx_mode(struct net_device *dev) RTL_W32(tp, RxConfig, tmp); } +static void rtl_hw_start(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + + tp->hw_start(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_set_rx_tx_config_registers(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(tp, IntrMask); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_mode(tp->dev); + /* no early-rx interrupts */ + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); + rtl_irq_enable_all(tp); +} + static void rtl_hw_start_8169(struct rtl8169_private *tp) { if (tp->mac_version == RTL_GIGA_MAC_VER_05) -- cgit v1.2.3 From 743e1a8438e5a37f9ab1846e75a47de8e9cd522f Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Sat, 19 May 2018 16:53:15 +0100 Subject: net: hns3: Fixes error reported by Kbuild and internal review This patch fixes the error reported by Intel's kbuild and fixes a return value in one of the legs, caught during review of the original patch sent by kbuild. Fixes: fdb793670a00 ("net: hns3: Add support of .sriov_configure in HNS3 driver") Signed-off-by: Fengguang Wu Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e85ff3805c96..e75c65270222 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1579,7 +1579,7 @@ static void hns3_remove(struct pci_dev *pdev) * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. **/ -int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { int ret; @@ -1592,6 +1592,8 @@ int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) ret = pci_enable_sriov(pdev, num_vfs); if (ret) dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); + else + return num_vfs; } else if (!pci_vfs_assigned(pdev)) { pci_disable_sriov(pdev); } else { -- cgit v1.2.3 From e63cd65f435c77eb2bc0c4f6691be350cd749dce Mon Sep 17 00:00:00 2001 From: Peng Li Date: Sat, 19 May 2018 16:53:16 +0100 Subject: net: hns3: Fixes API to fetch ethernet header length with kernel default During the RX leg driver needs to fetch the ethernet header length from the RX'ed Buffer Descriptor. Currently, proprietary version hns3_nic_get_headlen is being used to fetch the header length which uses l234info present in the Buffer Descriptor which might not be valid for the first Buffer Descriptor if the packet is spanning across multiple descriptors. Kernel default eth_get_headlen API does the job correctly. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Peng Li Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 104 +----------------------- 1 file changed, 2 insertions(+), 102 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e75c65270222..66cbb66b2326 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1954,106 +1954,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); } -/* hns3_nic_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - */ -static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* This should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* Initialize network frame pointer */ - network = data; - - /* Set first protocol and move network header forward */ - network += ETH_HLEN; - - /* Handle any vlan tag if present */ - if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) - == HNS3_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* Handle L3 protocols */ - if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* Access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* Record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* Record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* Relocate pointer to start of L4 header */ - network += hlen; - - /* Finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* Access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -2253,8 +2153,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - pull_len = hns3_nic_get_headlen(va, l234info, - HNS3_RX_HEAD_SIZE); + pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); -- cgit v1.2.3 From 99a6993a6933acbf791d1c7bb789e0ebe5bd7127 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Sat, 19 May 2018 16:53:17 +0100 Subject: net: hns3: cleanup of return values in hclge_init_client_instance() Removes the goto and directly returns in case of errors as part of the cleanup. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 46435c85b160..46e030c41fe9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5297,7 +5297,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->nic.client = client; ret = client->ops->init_instance(&vport->nic); if (ret) - goto err; + return ret; if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { @@ -5305,11 +5305,11 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = hclge_init_roce_base_info(vport); if (ret) - goto err; + return ret; ret = rc->ops->init_instance(&vport->roce); if (ret) - goto err; + return ret; } break; @@ -5319,7 +5319,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&vport->nic); if (ret) - goto err; + return ret; break; case HNAE3_CLIENT_ROCE: @@ -5331,18 +5331,16 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) - goto err; + return ret; ret = client->ops->init_instance(&vport->roce); if (ret) - goto err; + return ret; } } } return 0; -err: - return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, -- cgit v1.2.3 From 13562d1f5e2fbe2cf33b23a00abca3f71264c4ac Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Sat, 19 May 2018 16:53:18 +0100 Subject: net: hns3: Fix the missing client list node initialization This patch fixes the missing initialization of the client list node in the hnae3_register_client() function. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 66cbb66b2326..701ae5e79219 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3509,6 +3509,8 @@ static int __init hns3_init_module(void) client.ops = &client_ops; + INIT_LIST_HEAD(&client.node); + ret = hnae3_register_client(&client); if (ret) return ret; -- cgit v1.2.3 From 3c7624d8fc0b893b644b945ab904c629ebc9611e Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Sat, 19 May 2018 16:53:19 +0100 Subject: net: hns3: Fix for hns3 module is loaded multiple times problem If the hns3 driver has been built into kernel and then loaded with the same driver which built as KLM, it may trigger an error like below: [ 20.009555] hns3: Hisilicon Ethernet Network Driver for Hip08 Family - version [ 20.016789] hns3: Copyright (c) 2017 Huawei Corporation. [ 20.022100] Error: Driver 'hns3' is already registered, aborting... [ 23.517397] Unable to handle kernel NULL pointer dereference at virtual address 00000000 ... [ 23.691583] Process insmod (pid: 1982, stack limit = 0x00000000cd5f21cb) [ 23.698270] Call trace: [ 23.700705] __list_del_entry_valid+0x2c/0xd8 [ 23.705049] hnae3_unregister_client+0x68/0xa8 [ 23.709487] hns3_init_module+0x98/0x1000 [hns3] [ 23.714093] do_one_initcall+0x5c/0x170 [ 23.717918] do_init_module+0x64/0x1f4 [ 23.721654] load_module+0x1d14/0x24b0 [ 23.725390] SyS_init_module+0x158/0x208 [ 23.729300] el0_svc_naked+0x30/0x34 This patch fixes it by adding module version info. Fixes: 38caee9d3ee8 ("net: hns3: Add support of the HNAE3 framework") Signed-off-by: Xi Wang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 1 + drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 ++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 1 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 ++ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- 6 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index bd3c232234d7..63d7dbfb90bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -285,3 +285,4 @@ EXPORT_SYMBOL(hnae3_unregister_ae_dev); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); +MODULE_VERSION(HNAE3_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 2f266ef96582..45c571eea2ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -36,6 +36,8 @@ #include #include +#define HNAE3_MOD_VERSION "1.0" + /* Device IDs */ #define HNAE3_DEV_ID_GE 0xA220 #define HNAE3_DEV_ID_25GE 0xA221 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 701ae5e79219..cac51954f2cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3538,3 +3538,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_LICENSE("GPL"); MODULE_ALIAS("pci:hns-nic"); +MODULE_VERSION(HNS3_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 98cdbd3a1163..5b40f5a53761 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -14,6 +14,8 @@ #include "hnae3.h" +#define HNS3_MOD_VERSION "1.0" + extern const char hns3_driver_version[]; enum hns3_nic_state { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index af736a44607c..93177d91eea4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -17,7 +17,7 @@ #include "hclge_cmd.h" #include "hnae3.h" -#define HCLGE_MOD_VERSION "v1.0" +#define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" #define HCLGE_INVALID_VPORT 0xffff diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a477a7c36bbd..9763e742e6fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -9,7 +9,7 @@ #include "hclgevf_cmd.h" #include "hnae3.h" -#define HCLGEVF_MOD_VERSION "v1.0" +#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_ROCEE_VECTOR_NUM 0 -- cgit v1.2.3 From 42687543ca18c106ffd562b40199bd31ab3a347a Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Sat, 19 May 2018 16:53:20 +0100 Subject: net: hns3: Use enums instead of magic number in hclge_is_special_opcode This patch does bit of a clean-up by using already defined enums for certain values in function hclge_is_special_opcode(). Below enums from have been used as replacements for magic values: enum hclge_opcode_type{ HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, HCLGE_OPC_STATS_MAC = 0x0032, }; Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index fab70683bbf7..59fb0eb755e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -190,7 +190,11 @@ static int hclge_cmd_csq_done(struct hclge_hw *hw) static bool hclge_is_special_opcode(u16 opcode) { - u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032}; + /* these commands have several descriptors, + * and use the first one to save opcode and return value + */ + u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { -- cgit v1.2.3 From f30dfddcca1b7773ef8ed8ddbf2d477bc5eb9f3e Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Sat, 19 May 2018 16:53:21 +0100 Subject: net: hns3: Fix for netdev not running problem after calling net_stop and net_open The link status update function is called by timer every second. But net_stop and net_open may be called with very short intervals. The link status update function can not detect the link state has changed. It causes the netdev not running problem. This patch fixes it by updating the link state in ae_stop function. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 46e030c41fe9..2f0bbb6708b9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3793,6 +3793,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + hclge_update_link_status(hdev); } static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, -- cgit v1.2.3 From 7bb572fddd757512130afcc0a8a683095eee1cd5 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Sat, 19 May 2018 16:53:22 +0100 Subject: net: hns3: Fixes kernel panic issue during rmmod hns3 driver If CONFIG_ARM_SMMU_V3 is enabled, arm64's dma_ops will replace arm64_swiotlb_dma_ops with iommu_dma_ops. When releasing contiguous dma memory, the new ops will call the vunmap function which cannot be run in interrupt context. Currently, spin_lock_bh is called before vunmap is executed. This disables BH and causes the interrupt context to be detected to generate a kernel panic like below: [ 2831.573400] kernel BUG at mm/vmalloc.c:1621! [ 2831.577659] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ... [ 2831.699907] Process rmmod (pid: 1893, stack limit = 0x0000000055103ee2) [ 2831.706507] Call trace: [ 2831.708941] vunmap+0x48/0x50 [ 2831.711897] dma_common_free_remap+0x78/0x88 [ 2831.716155] __iommu_free_attrs+0xa8/0x1c0 [ 2831.720255] hclge_free_cmd_desc+0xc8/0x118 [hclge] [ 2831.725128] hclge_destroy_cmd_queue+0x34/0x68 [hclge] [ 2831.730261] hclge_uninit_ae_dev+0x90/0x100 [hclge] [ 2831.735127] hnae3_unregister_ae_dev+0xb0/0x868 [hnae3] [ 2831.740345] hns3_remove+0x3c/0x90 [hns3] [ 2831.744344] pci_device_remove+0x48/0x108 [ 2831.748342] device_release_driver_internal+0x164/0x200 [ 2831.753553] driver_detach+0x4c/0x88 [ 2831.757116] bus_remove_driver+0x60/0xc0 [ 2831.761026] driver_unregister+0x34/0x60 [ 2831.764935] pci_unregister_driver+0x30/0xb0 [ 2831.769197] hns3_exit_module+0x10/0x978 [hns3] [ 2831.773715] SyS_delete_module+0x1f8/0x248 [ 2831.777799] el0_svc_naked+0x30/0x34 This patch fixes it by using spin_lock instead of spin_lock_bh. Fixes: 68c0a5c70614 ("net: hns3: Add HNS3 IMP(Integrated Mgmt Proc) Cmd Interface Support") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 59fb0eb755e7..c36d64710fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -385,9 +385,9 @@ int hclge_cmd_init(struct hclge_dev *hdev) static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { - spin_lock_bh(&ring->lock); + spin_lock(&ring->lock); hclge_free_cmd_desc(ring); - spin_unlock_bh(&ring->lock); + spin_unlock(&ring->lock); } void hclge_destroy_cmd_queue(struct hclge_hw *hw) -- cgit v1.2.3 From eddf04626d1d6d0bcd01ac6a287e49f5ddb90a26 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Sat, 19 May 2018 16:53:23 +0100 Subject: net: hns3: Fix for CMDQ and Misc. interrupt init order problem When vf module is loading, the cmd queue initialization should happen before misc interrupt initialization, otherwise the misc interrupt handle will cause using uninitialized cmd queue problem. There is also the same issue when vf module is unloading. This patch fixes it by adjusting the location of some function. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f1f4a17340a5..2b0e3295989f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1634,6 +1634,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) hclgevf_state_init(hdev); + ret = hclgevf_cmd_init(hdev); + if (ret) + goto err_cmd_init; + ret = hclgevf_misc_irq_init(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", @@ -1641,10 +1645,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_misc_irq_init; } - ret = hclgevf_cmd_init(hdev); - if (ret) - goto err_cmd_init; - ret = hclgevf_configure(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); @@ -1692,10 +1692,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) return 0; err_config: - hclgevf_cmd_uninit(hdev); -err_cmd_init: hclgevf_misc_irq_uninit(hdev); err_misc_irq_init: + hclgevf_cmd_uninit(hdev); +err_cmd_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); err_irq_init: @@ -1705,9 +1705,9 @@ err_irq_init: static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { - hclgevf_cmd_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); hclgevf_state_uninit(hdev); + hclgevf_misc_irq_uninit(hdev); + hclgevf_cmd_uninit(hdev); hclgevf_uninit_msi(hdev); hclgevf_pci_uninit(hdev); } -- cgit v1.2.3 From 27164491cb82b611b5269e632e4af0664788794d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 20 May 2018 00:02:36 +0300 Subject: sh_eth: fix typo in EESR.TRO bit name The correct name of the EESR bit 8 is TRO (transmit retry over), not RTO. Note that EESIPR bit 8, TROIP remained correct... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 10 +++++----- drivers/net/ethernet/renesas/sh_eth.h | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 83148ca61317..d71657dce281 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -706,7 +706,7 @@ static struct sh_eth_cpu_data rcar_gen1_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, @@ -738,7 +738,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, @@ -774,7 +774,7 @@ static struct sh_eth_cpu_data r8a77980_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, @@ -831,7 +831,7 @@ static struct sh_eth_cpu_data sh7724_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, @@ -876,7 +876,7 @@ static struct sh_eth_cpu_data sh7757_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 7d5aaba1384a..9d9e80c33343 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -243,7 +243,7 @@ enum EESR_BIT { EESR_CND = 0x00000800, EESR_DLC = 0x00000400, EESR_CD = 0x00000200, - EESR_RTO = 0x00000100, + EESR_TRO = 0x00000100, EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040, EESR_CELF = 0x00000020, @@ -263,7 +263,7 @@ enum EESR_BIT { EESR_CERF) /* Recv frame CRC error */ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ - EESR_RTO) + EESR_TRO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE) -- cgit v1.2.3 From 9c59c9a8e94660d05790e47bd4a8c756254614b7 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 20 May 2018 00:03:42 +0300 Subject: sh_eth: fix comment grammar in 'struct sh_eth_cpu_data' All the verbs in the comments to the 'struct sh_eth_cpu_data' declaration should be in a 3rd person singular, to match the nouns. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 9d9e80c33343..ceb420e556a6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -499,21 +499,21 @@ struct sh_eth_cpu_data { /* hardware features */ unsigned long irq_flags; /* IRQ configuration flags */ - unsigned no_psr:1; /* EtherC DO NOT have PSR */ - unsigned apr:1; /* EtherC have APR */ - unsigned mpr:1; /* EtherC have MPR */ - unsigned tpauser:1; /* EtherC have TPAUSER */ - unsigned bculr:1; /* EtherC have BCULR */ - unsigned tsu:1; /* EtherC have TSU */ - unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ + unsigned no_psr:1; /* EtherC DOES NOT have PSR */ + unsigned apr:1; /* EtherC has APR */ + unsigned mpr:1; /* EtherC has MPR */ + unsigned tpauser:1; /* EtherC has TPAUSER */ + unsigned bculr:1; /* EtherC has BCULR */ + unsigned tsu:1; /* EtherC has TSU */ + unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */ unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */ - unsigned rpadir:1; /* E-DMAC have RPADIR */ - unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ - unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ + unsigned rpadir:1; /* E-DMAC has RPADIR */ + unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */ + unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */ unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ unsigned hw_checksum:1; /* E-DMAC has CSMR */ - unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ + unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ -- cgit v1.2.3 From 6b14787ab11d8fc2225132f0fe8275531fdefc72 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 20 May 2018 00:05:02 +0300 Subject: sh_eth: fix typo in comment to BCULR write Simon has noticed a typo in the comment accompaining the BCULR write -- fix it and move the comment before the write (following the style of the other comments), while at it... Reported-by: Simon Horman Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d71657dce281..d9cadfb1bc4a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1481,8 +1481,9 @@ static int sh_eth_dev_init(struct net_device *ndev) if (mdp->cd->nbst) sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST); + /* Burst cycle count upper-limit */ if (mdp->cd->bculr) - sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ + sh_eth_write(ndev, 0x800, BCULR); sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); -- cgit v1.2.3 From 577941eb56fe5fd95e1a60c6dba3dfaf606e3fde Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 19 May 2018 22:31:33 +0200 Subject: net: dsa: mv88e6xxx: Remove OF check for IRQ domain An IRQ domain will work without an OF node. It is not possible to reference interrupts via a phandle, but C code can still use irq_find_mapping() to get an interrupt from the domain. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global2.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index f9bde011a3e6..91a3cb2452ac 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1047,9 +1047,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { int err, irq, virq; - if (!chip->dev->of_node) - return -EINVAL; - chip->g2_irq.domain = irq_domain_add_simple( chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); if (!chip->g2_irq.domain) -- cgit v1.2.3 From 877b7cb0b6f283593a663134ee52703f12c895cc Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 19 May 2018 22:31:34 +0200 Subject: net: dsa: mv88e6xxx: Add minimal platform_data support Not all the world uses device tree. Some parts of the world still use platform devices and platform data. Add basic support for probing a Marvell switch via platform data. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/dsa/mv88e6xxx/chip.c | 56 ++++++++++++++++++++++++++++----- include/linux/platform_data/mv88e6xxx.h | 17 ++++++++++ 3 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 include/linux/platform_data/mv88e6xxx.h (limited to 'drivers/net') diff --git a/MAINTAINERS b/MAINTAINERS index 658880464b9d..9f2045a5adac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8466,6 +8466,7 @@ M: Vivien Didelot L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mv88e6xxx/ +F: linux/platform_data/mv88e6xxx.h F: Documentation/devicetree/bindings/net/dsa/marvell.txt MARVELL ARMADA DRM SUPPORT diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 2bb3f03ee1cb..5b40382036ea 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -4350,6 +4351,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) return -ENOMEM; ds->priv = chip; + ds->dev = dev; ds->ops = &mv88e6xxx_switch_ops; ds->ageing_time_min = chip->info->age_time_coeff; ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; @@ -4364,36 +4366,73 @@ static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) dsa_unregister_switch(chip->ds); } +static const void *pdata_device_get_match_data(struct device *dev) +{ + const struct of_device_id *matches = dev->driver->of_match_table; + const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data; + + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; + matches++) { + if (!strcmp(pdata->compatible, matches->compatible)) + return matches->data; + } + return NULL; +} + static int mv88e6xxx_probe(struct mdio_device *mdiodev) { + struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node; const struct mv88e6xxx_info *compat_info; struct mv88e6xxx_chip *chip; u32 eeprom_len; + int port; int err; - compat_info = of_device_get_match_data(dev); + if (np) + compat_info = of_device_get_match_data(dev); + + if (pdata) { + compat_info = pdata_device_get_match_data(dev); + + if (!pdata->netdev) + return -EINVAL; + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(pdata->enabled_ports & (1 << port))) + continue; + if (strcmp(pdata->cd.port_names[port], "cpu")) + continue; + pdata->cd.netdev[port] = &pdata->netdev->dev; + break; + } + } + if (!compat_info) return -EINVAL; chip = mv88e6xxx_alloc_chip(dev); - if (!chip) - return -ENOMEM; + if (!chip) { + err = -ENOMEM; + goto out; + } chip->info = compat_info; err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); if (err) - return err; + goto out; chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(chip->reset)) - return PTR_ERR(chip->reset); + if (IS_ERR(chip->reset)) { + err = PTR_ERR(chip->reset); + goto out; + } err = mv88e6xxx_detect(chip); if (err) - return err; + goto out; mv88e6xxx_phy_init(chip); @@ -4468,6 +4507,9 @@ out_g1_irq: mv88e6xxx_irq_poll_free(chip); mutex_unlock(&chip->reg_lock); out: + if (pdata) + dev_put(pdata->netdev); + return err; } diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h new file mode 100644 index 000000000000..88e91e05f48f --- /dev/null +++ b/include/linux/platform_data/mv88e6xxx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DSA_MV88E6XXX_H +#define __DSA_MV88E6XXX_H + +#include + +struct dsa_mv88e6xxx_pdata { + /* Must be first, such that dsa_register_switch() can access this + * without gory pointer manipulations + */ + struct dsa_chip_data cd; + const char *compatible; + unsigned int enabled_ports; + struct net_device *netdev; +}; + +#endif -- cgit v1.2.3 From 00baabe5286c41d21ed4a78f479b021eba1f0d51 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 19 May 2018 22:31:35 +0200 Subject: net: dsa: mv88e6xxx: Add support for EEPROM via platform data Add the size of the EEPROM to the platform data, so it can also be instantiated by a platform device. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 11 +++++++---- drivers/net/dsa/mv88e6xxx/chip.h | 2 +- include/linux/platform_data/mv88e6xxx.h | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5b40382036ea..1fa1f820a437 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4386,7 +4386,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) struct device_node *np = dev->of_node; const struct mv88e6xxx_info *compat_info; struct mv88e6xxx_chip *chip; - u32 eeprom_len; int port; int err; @@ -4436,9 +4435,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_phy_init(chip); - if (chip->info->ops->get_eeprom && - !of_property_read_u32(np, "eeprom-length", &eeprom_len)) - chip->eeprom_len = eeprom_len; + if (chip->info->ops->get_eeprom) { + if (np) + of_property_read_u32(np, "eeprom-length", + &chip->eeprom_len); + else + chip->eeprom_len = pdata->eeprom_len; + } mutex_lock(&chip->reg_lock); err = mv88e6xxx_switch_reset(chip); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 012268046442..8ac3fbb15352 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -238,7 +238,7 @@ struct mv88e6xxx_chip { struct gpio_desc *reset; /* set to size of eeprom if supported by the switch */ - int eeprom_len; + u32 eeprom_len; /* List of mdio busses */ struct list_head mdios; diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h index 88e91e05f48f..f63af2955ea0 100644 --- a/include/linux/platform_data/mv88e6xxx.h +++ b/include/linux/platform_data/mv88e6xxx.h @@ -12,6 +12,7 @@ struct dsa_mv88e6xxx_pdata { const char *compatible; unsigned int enabled_ports; struct net_device *netdev; + u32 eeprom_len; }; #endif -- cgit v1.2.3 From 7ddae24f97f9fc8f151aae75f0371ced06a8cd99 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 20 May 2018 19:04:24 -0400 Subject: mv88e6xxx: Fix uninitialized variable warning. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In mv88e6xxx_probe(), ("np" or "pdata") might be an invariant but GCC can't see that, therefore: drivers/net/dsa/mv88e6xxx/chip.c: In function ‘mv88e6xxx_probe’: drivers/net/dsa/mv88e6xxx/chip.c:4420:13: warning: ‘compat_info’ may be used uninitialized in this function [-Wmaybe-uninitialized] chip->info = compat_info; Actually, it should have warned on the "if (!compat_info)" test, but whatever. Explicitly initialize to NULL in the variable declaration to deal with this. Fixes: 877b7cb0b6f2 ("net: dsa: mv88e6xxx: Add minimal platform_data support") Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1fa1f820a437..12df00f593b7 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4382,9 +4382,9 @@ static const void *pdata_device_get_match_data(struct device *dev) static int mv88e6xxx_probe(struct mdio_device *mdiodev) { struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; + const struct mv88e6xxx_info *compat_info = NULL; struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node; - const struct mv88e6xxx_info *compat_info; struct mv88e6xxx_chip *chip; int port; int err; -- cgit v1.2.3 From 3bcd47726c3b744fd08781795cca905cc59a1382 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 20 May 2018 20:49:47 -0700 Subject: net: phy: phylink: Don't release NULL GPIO If CONFIG_GPIOLIB is disabled, gpiod_put() becomes a stub that produces a warning, this helped identify that we could be attempting to release a NULL pl->link_gpio GPIO descriptor, so guard against that. Fixes: daab3349ad1a ("net: phy: phylink: Release link GPIO") Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 581ce93ecaf9..af4dc4425be2 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -624,7 +624,7 @@ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) sfp_unregister_upstream(pl->sfp_bus); - if (!IS_ERR(pl->link_gpio)) + if (!IS_ERR_OR_NULL(pl->link_gpio)) gpiod_put(pl->link_gpio); cancel_work_sync(&pl->resolve); -- cgit v1.2.3 From 6c541b4595a28b204888db75aba1966ede4a6184 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 20 May 2018 20:58:28 -0700 Subject: net: ethernet: Sort Kconfig sourcing alphabetically A number of entries were not alphabetically sorted, remedy that. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 54d71e1c48d5..af766fd61151 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -33,9 +33,9 @@ source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" -source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" +source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/cavium/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" @@ -72,16 +72,16 @@ source "drivers/net/ethernet/dec/Kconfig" source "drivers/net/ethernet/dlink/Kconfig" source "drivers/net/ethernet/emulex/Kconfig" source "drivers/net/ethernet/ezchip/Kconfig" -source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/faraday/Kconfig" source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/hp/Kconfig" source "drivers/net/ethernet/huawei/Kconfig" +source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/i825xx/Kconfig" +source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -114,8 +114,8 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" -source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX @@ -161,20 +161,21 @@ source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" +source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" -source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/rocker/Kconfig" source "drivers/net/ethernet/samsung/Kconfig" source "drivers/net/ethernet/seeq/Kconfig" -source "drivers/net/ethernet/silan/Kconfig" -source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" +source "drivers/net/ethernet/silan/Kconfig" +source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" @@ -183,6 +184,5 @@ source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" endif # ETHERNET -- cgit v1.2.3 From a6076fcd187a1cb4900cf970a04401957b4b4ab8 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Mon, 21 May 2018 12:26:36 +0530 Subject: cxgb4: copy the length of cpl_tx_pkt_core to fw_wr immdlen field of FW_ETH_TX_PKT_WR is filled in a wrong way, we must copy the length of all the cpls encapsulated in fw work request. In the xmit path we missed adding the length of CPL_TX_PKT_CORE but we added the length of WR_HDR and it worked because WR_HDR and CPL_TX_PKT_CORE are of same length. Add the length of cpl_tx_pkt_core not WR_HDR's. This also fixes the lso cpl errors for udp tunnels Fixes: d0a1299c6bf7 ("cxgb4: add support for vxlan segmentation offload") Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 0f87e973a158..276f22357f81 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1411,8 +1411,9 @@ out_free: dev_kfree_skb_any(skb); end = (u64 *)wr + flits; len = immediate ? skb->len : 0; + len += sizeof(*cpl); if (ssi->gso_size) { - struct cpl_tx_pkt_lso *lso = (void *)wr; + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; @@ -1442,20 +1443,19 @@ out_free: dev_kfree_skb_any(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) cntrl = hwcsum(adap->params.chip, skb); } else { - lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | - LSO_IPV6_V(v6) | - LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | - LSO_IPHDR_LEN_V(l3hdr_len / 4) | - LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); - lso->c.ipid_ofst = htons(0); - lso->c.mss = htons(ssi->gso_size); - lso->c.seqno_offset = htonl(0); + lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); if (is_t4(adap->params.chip)) - lso->c.len = htonl(skb->len); + lso->len = htonl(skb->len); else - lso->c.len = - htonl(LSO_T5_XFER_SIZE_V(skb->len)); + lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); cpl = (void *)(lso + 1); if (CHELSIO_CHIP_VERSION(adap->params.chip) @@ -1484,7 +1484,6 @@ out_free: dev_kfree_skb_any(skb); q->tso++; q->tx_cso += ssi->gso_segs; } else { - len += sizeof(*cpl); if (ptp_enabled) op = FW_PTP_TX_PKT_WR; else @@ -1538,7 +1537,7 @@ out_free: dev_kfree_skb_any(skb); if (last_desc >= q->q.size) last_desc -= q->q.size; q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; } txq_advance(&q->q, ndesc); -- cgit v1.2.3 From b4eb7393683d70976a0736b37f1562e8421e3120 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 May 2018 11:45:51 -0700 Subject: ti: ethernet: cpdma: Use correct format for genpool_* Now that we can compile davinci_cpdma.c on 64-bit hosts, we can see that the format used for printing a size_t type is incorrect, use %zd accordingly. Fixes: aeec3021043b ("net: ethernet: ti: cpdma: remove used_desc counter") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 31ae04117f0a..9ec49213de5e 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -191,7 +191,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) return; WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), - "cpdma_desc_pool size %d != avail %d", + "cpdma_desc_pool size %zd != avail %zd", gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) -- cgit v1.2.3 From ea5ec9fc9eb8bb1eed5ff12dcec328dec918664f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 May 2018 11:45:52 -0700 Subject: net: ethernet: ti: cpts: Fix timestamp print On 64-bit hosts we will get the following warning: drivers/net/ethernet/ti/cpts.c: In function 'cpts_overflow_check': drivers/net/ethernet/ti/cpts.c:297:11: warning: format '%lld' expects argument of type 'long long int', but argument 3 has type '__kernel_time_t {aka long int}' [-Wformat=] pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); Fix this by using an appropriate casting that works on all bit sizes. Fixes: a5c79c26e168 ("ptp: cpts: convert to the 64 bit get/set time methods.") Fixes: 87c0e764d43a ("cpts: introduce time stamping code and a PTP hardware clock.") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpts.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 7842f094f2ef..6f63c8729afc 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -294,7 +294,8 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) delay = CPTS_SKB_TX_WORK_TIMEOUT; spin_unlock_irqrestore(&cpts->lock, flags); - pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); + pr_debug("cpts overflow check at %lld.%09ld\n", + (long long)ts.tv_sec, ts.tv_nsec); return (long)delay; } -- cgit v1.2.3 From bf2ce3fdf314cd181908b55a4d6c58817674a062 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 May 2018 11:45:53 -0700 Subject: net: ethernet: ti: cpsw: Fix cpsw_add_ch_strings() printk format When building on a 64-bit host we will get the following warning: drivers/net/ethernet/ti/cpsw.c: In function 'cpsw_add_ch_strings': drivers/net/ethernet/ti/cpsw.c:1284:19: warning: format '%d' expects argument of type 'int', but argument 5 has type 'long unsigned int' [-Wformat=] "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx", ~^ %ld Fix this by using an %ld format and casting to long. Fixes: e05107e6b747 ("net: ethernet: ti: cpsw: add multi queue support") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a7285dddfd29..643cd2d9dfb6 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1281,8 +1281,8 @@ static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) for (i = 0; i < ch_stats_len; i++) { line = i % CPSW_STATS_CH_LEN; snprintf(*p, ETH_GSTRING_LEN, - "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx", - i / CPSW_STATS_CH_LEN, + "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", + (long)(i / CPSW_STATS_CH_LEN), cpsw_gstrings_ch_stats[line].stat_string); *p += ETH_GSTRING_LEN; } -- cgit v1.2.3 From 5a04e8f81a4f55ce1c2b7b525744a187c99ba302 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 May 2018 11:45:54 -0700 Subject: net: ethernet: davinci_emac: Fix printing of base address Use %pa which is the correct formatter to print a physical address, instead of %p which is just a pointer. Fixes: a6286ee630f6 ("net: Add TI DaVinci EMAC driver") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index abceea802ea1..be0fec17d95d 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1930,8 +1930,8 @@ static int davinci_emac_probe(struct platform_device *pdev) if (netif_msg_probe(priv)) { dev_notice(&pdev->dev, "DaVinci EMAC Probe found device " - "(regs: %p, irq: %d)\n", - (void *)priv->emac_base_phys, ndev->irq); + "(regs: %pa, irq: %d)\n", + &priv->emac_base_phys, ndev->irq); } pm_runtime_put(&pdev->dev); -- cgit v1.2.3 From c79c3850440c1535b845e96e7e5ff0139ba479e6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 May 2018 11:45:55 -0700 Subject: ti: ethernet: davinci: Fix cast to int warnings Now that we can compile test this driver on 64-bit hosts, we get some warnings about how a pointer/address is written/read to/from a register (sw_token). Fix this by doing the appropriate conversions, we cannot possibly have the driver work on 64-bit hosts the way the tokens are managed though, since the registers being written to a 32-bit only. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 9ec49213de5e..cdbddf16dd29 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1080,7 +1080,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, writel_relaxed(buffer, &desc->hw_buffer); writel_relaxed(len, &desc->hw_len); writel_relaxed(mode | len, &desc->hw_mode); - writel_relaxed(token, &desc->sw_token); + writel_relaxed((uintptr_t)token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); writel_relaxed(len, &desc->sw_len); desc_read(desc, sw_len); @@ -1121,15 +1121,15 @@ static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t buff_dma; int origlen; - void *token; + uintptr_t token; - token = (void *)desc_read(desc, sw_token); + token = desc_read(desc, sw_token); buff_dma = desc_read(desc, sw_buffer); origlen = desc_read(desc, sw_len); dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); cpdma_desc_free(pool, desc, 1); - (*chan->handler)(token, outlen, status); + (*chan->handler)((void *)token, outlen, status); } static int __cpdma_chan_process(struct cpdma_chan *chan) -- cgit v1.2.3 From bdf2752305af1f8c2c2422ae2b292194cf33f5e9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:32 -0700 Subject: i40e: free skb after clearing lock in ptp_stop Use the same logic to free the skb after clearing the Tx timestamp bit lock in i40e_ptp_stop as we use in the other locations. It is not as important here since we are not racing against a future Tx timestamp request (as we are disabling PTP at this point). However it is good to be consistent in how we approach the bit lock so that future callers don't copy the old anti-pattern. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d50d84927e6b..35f2866b38c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -799,9 +799,11 @@ void i40e_ptp_stop(struct i40e_pf *pf) pf->ptp_rx = false; if (pf->ptp_tx_skb) { - dev_kfree_skb_any(pf->ptp_tx_skb); + struct sk_buff *skb = pf->ptp_tx_skb; + pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + dev_kfree_skb_any(skb); } if (pf->ptp_clock) { -- cgit v1.2.3 From 9955d4940e33442b9b2f1f255a63aac21f8ec3c3 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:33 -0700 Subject: i40e: always return VEB stat strings The ethtool API for obtaining device statistics is not intended to allow runtime changes in the number of statistics reported. It may *appear* this way, as there is an ability to request the number of stats using ethtool_get_set_count(). However, it is expected that this must always return the same value for invocations of the same device. If we don't satisfy this contract, and allow the number of stats to change during run time, we could cause invalid memory accesses or report the stat strings incorrectly. This is because the API for obtaining stats is to (1) get the size, (2) get the strings and finally (3) get the stats. Since these are each separate ethtool op commands, it is not possible to maintain consistency by holding the RTNL lock over the whole operation. This results in the potential for a race condition to occur where the size changed between any of the 3 calls. Avoid this issue by requiring that we always return the same value for a given device. We can check any values which remain constant for the life of the device, but must not report different sizes depending on runtime attributes. This patch specifically fixes the VEB statistics strings to always be reported. Other issues will be fixed in future patches. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 52 ++++++++++++-------------- 1 file changed, 23 insertions(+), 29 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 329e59eae4a1..de5dad7ff340 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1661,15 +1661,10 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { - if (pf->lan_veb != I40E_NO_VEB && - pf->flags & I40E_FLAG_VEB_STATS_ENABLED) - return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL; - else - return I40E_PF_STATS_LEN(netdev); - } else { + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) + return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL; + else return I40E_VSI_STATS_LEN(netdev); - } } static int i40e_get_sset_count(struct net_device *netdev, int sset) @@ -1760,6 +1755,8 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = veb->tc_stats.tc_rx_packets[j]; data[i++] = veb->tc_stats.tc_rx_bytes[j]; } + } else { + i += I40E_VEB_STATS_TOTAL; } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -1816,27 +1813,24 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if ((pf->lan_veb != I40E_NO_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { - for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "veb.%s", - i40e_gstrings_veb_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_rx_bytes", i); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "veb.%s", + i40e_gstrings_veb_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_bytes", i); + p += ETH_GSTRING_LEN; } for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "port.%s", -- cgit v1.2.3 From b8312365097ca854a03231bd58a284491aaa8ef7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:34 -0700 Subject: i40e: always return all queue stat strings The ethtool API for obtaining device statistics is not intended to allow runtime changes in the number of statistics reported. It may *appear* this way, as there is an ability to request the number of stats using ethtool_get_set_count(). However, it is expected that this must always return the same value for invocations of the same device. If we don't satisfy this contract, and allow the number of stats to change during run time, we could cause invalid memory accesses or report the stat strings incorrectly. This is because the API for obtaining stats is to (1) get the size, (2) get the strings and finally (3) get the stats. Since these are each separate ethtool op commands, it is not possible to maintain consistency by holding the RTNL lock over the whole operation. This results in the potential for a race condition to occur where the size changed between any of the 3 calls. Avoid this issue by requiring that we always return the same value for a given device. We can check any values which remain constant for the life of the device, but must not report different sizes depending on runtime attributes. This patch specifically fixes the queue statistics to always return every queue even if it's not currently in use. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index de5dad7ff340..bacb01b63727 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -140,8 +140,12 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), }; -#define I40E_QUEUE_STATS_LEN(n) \ - (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ +/* We use num_tx_queues here as a proxy for the maximum number of queues + * available because we always allocate queues symmetrically. + */ +#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues) +#define I40E_QUEUE_STATS_LEN(n) \ + (I40E_MAX_NUM_QUEUES(n) \ * 2 /* Tx and Rx together */ \ * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) @@ -1712,11 +1716,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } rcu_read_lock(); - for (j = 0; j < vsi->num_queue_pairs; j++) { + for (j = 0; j < I40E_MAX_NUM_QUEUES(netdev) ; j++) { tx_ring = READ_ONCE(vsi->tx_rings[j]); - if (!tx_ring) + if (!tx_ring) { + /* Bump the stat counter to skip these stats, and make + * sure the memory is zero'd + */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; continue; + } /* process Tx ring statistics */ do { @@ -1800,7 +1812,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, i40e_gstrings_misc_stats[i].stat_string); p += ETH_GSTRING_LEN; } - for (i = 0; i < vsi->num_queue_pairs; i++) { + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); -- cgit v1.2.3 From 019b9cd44d5aa457d9e45c85847f86ced40c852d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:35 -0700 Subject: i40e: split i40e_get_strings() into smaller functions Split the statistic strings and private flags strings into their own separate functions to aid code readability. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 183 ++++++++++++++----------- 1 file changed, 100 insertions(+), 83 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index bacb01b63727..c50ed2d391e1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1787,8 +1787,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = pf->stats.priority_xon_2_xoff[j]; } -static void i40e_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) +static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1796,95 +1795,113 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, char *p = (char *)data; unsigned int i; + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_net_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_misc_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + p += ETH_GSTRING_LEN; + } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "veb.%s", + i40e_gstrings_veb_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "port.%s", + i40e_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon_2_xoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ +} + +static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } + if (pf->hw.pf_id != 0) + return; + for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gl_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void i40e_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ switch (stringset) { case ETH_SS_TEST: memcpy(data, i40e_gstrings_test, I40E_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_net_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MISC_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_misc_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i); - p += ETH_GSTRING_LEN; - } - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; - - for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "veb.%s", - i40e_gstrings_veb_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "port.%s", - i40e_gstrings_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%d_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%d_xoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xon_2_xoff", i); - p += ETH_GSTRING_LEN; - } - /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ + i40e_get_stat_strings(netdev, data); break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } - if (pf->hw.pf_id != 0) - break; - for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + i40e_get_priv_flag_strings(netdev, data); break; default: break; -- cgit v1.2.3 From 9b10df596bd4d38f2a58cf87e0780510acc53d8d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:36 -0700 Subject: i40e: use WARN_ONCE to replace the commented BUG_ON size check We don't really want to use BUG_ON here since that would completely crash the kernel, thus the reason we commented it out. We *can't* use BUILD_BUG_ON because at least now (a) the sizes aren't constant (we are fixing this) and (b) not all compilers are smart enough to understand that "p - data" is a constant. Instead, just use a WARN_ONCE so that the first time we end up with an incorrect size we will dump a stack trace and a message, hopefully highlighting the issues early in testing. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c50ed2d391e1..32bcb6a2a590 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1792,8 +1792,8 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char *p = (char *)data; unsigned int i; + u8 *p = data; for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "%s", @@ -1864,7 +1864,9 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) "port.rx_priority_%u_xon_2_xoff", i); p += ETH_GSTRING_LEN; } - /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ + + WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, + "stat strings count mismatch!"); } static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) -- cgit v1.2.3 From bf1c39e64016c2c29991f20f29a91a76272b043a Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:37 -0700 Subject: i40e: fold prefix strings directly into stat names We always prefix these stats with a fixed string, so just fold this prefix into the stat string definition. This preparatory work will make it easier to implement a helper function to copy stats and strings into the supplied buffers in a future patch. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 137 +++++++++++++------------ 1 file changed, 69 insertions(+), 68 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 32bcb6a2a590..6b34845d251c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -42,18 +42,18 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = { }; static const struct i40e_stats i40e_gstrings_veb_stats[] = { - I40E_VEB_STAT("rx_bytes", stats.rx_bytes), - I40E_VEB_STAT("tx_bytes", stats.tx_bytes), - I40E_VEB_STAT("rx_unicast", stats.rx_unicast), - I40E_VEB_STAT("tx_unicast", stats.tx_unicast), - I40E_VEB_STAT("rx_multicast", stats.rx_multicast), - I40E_VEB_STAT("tx_multicast", stats.tx_multicast), - I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), - I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), - I40E_VEB_STAT("rx_discards", stats.rx_discards), - I40E_VEB_STAT("tx_discards", stats.tx_discards), - I40E_VEB_STAT("tx_errors", stats.tx_errors), - I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), + I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes), + I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes), + I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast), + I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast), + I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast), + I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast), + I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast), + I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast), + I40E_VEB_STAT("veb.rx_discards", stats.rx_discards), + I40E_VEB_STAT("veb.tx_discards", stats.tx_discards), + I40E_VEB_STAT("veb.tx_errors", stats.tx_errors), + I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), }; static const struct i40e_stats i40e_gstrings_misc_stats[] = { @@ -82,62 +82,63 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { * is queried on the base PF netdev, not on the VMDq or FCoE netdev. */ static const struct i40e_stats i40e_gstrings_stats[] = { - I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), - I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), - I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), - I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), - I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), - I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), - I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), - I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), - I40E_PF_STAT("tx_errors", stats.eth.tx_errors), - I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), - I40E_PF_STAT("rx_crc_errors", stats.crc_errors), - I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), - I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), - I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), - I40E_PF_STAT("tx_timeout", tx_timeout_count), - I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), - I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), - I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), - I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), - I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), - I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), - I40E_PF_STAT("rx_size_64", stats.rx_size_64), - I40E_PF_STAT("rx_size_127", stats.rx_size_127), - I40E_PF_STAT("rx_size_255", stats.rx_size_255), - I40E_PF_STAT("rx_size_511", stats.rx_size_511), - I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), - I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), - I40E_PF_STAT("rx_size_big", stats.rx_size_big), - I40E_PF_STAT("tx_size_64", stats.tx_size_64), - I40E_PF_STAT("tx_size_127", stats.tx_size_127), - I40E_PF_STAT("tx_size_255", stats.tx_size_255), - I40E_PF_STAT("tx_size_511", stats.tx_size_511), - I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), - I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), - I40E_PF_STAT("tx_size_big", stats.tx_size_big), - I40E_PF_STAT("rx_undersize", stats.rx_undersize), - I40E_PF_STAT("rx_fragments", stats.rx_fragments), - I40E_PF_STAT("rx_oversize", stats.rx_oversize), - I40E_PF_STAT("rx_jabber", stats.rx_jabber), - I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), - I40E_PF_STAT("arq_overflows", arq_overflows), - I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), - I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), - I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), - I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), - I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), - I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), - I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), - I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), + I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), + I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), + I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), + I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), + I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), + I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), + I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), + I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), + I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors), + I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), + I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors), + I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes), + I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults), + I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("port.tx_timeout", tx_timeout_count), + I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), + I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors), + I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx), + I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), + I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx), + I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("port.rx_size_64", stats.rx_size_64), + I40E_PF_STAT("port.rx_size_127", stats.rx_size_127), + I40E_PF_STAT("port.rx_size_255", stats.rx_size_255), + I40E_PF_STAT("port.rx_size_511", stats.rx_size_511), + I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023), + I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522), + I40E_PF_STAT("port.rx_size_big", stats.rx_size_big), + I40E_PF_STAT("port.tx_size_64", stats.tx_size_64), + I40E_PF_STAT("port.tx_size_127", stats.tx_size_127), + I40E_PF_STAT("port.tx_size_255", stats.tx_size_255), + I40E_PF_STAT("port.tx_size_511", stats.tx_size_511), + I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023), + I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522), + I40E_PF_STAT("port.tx_size_big", stats.tx_size_big), + I40E_PF_STAT("port.rx_undersize", stats.rx_undersize), + I40E_PF_STAT("port.rx_fragments", stats.rx_fragments), + I40E_PF_STAT("port.rx_oversize", stats.rx_oversize), + I40E_PF_STAT("port.rx_jabber", stats.rx_jabber), + I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("port.arq_overflows", arq_overflows), + I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped), + I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt), + I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status), + I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status), /* LPI stats */ - I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), - I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), - I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), - I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), + I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count), }; /* We use num_tx_queues here as a proxy for the maximum number of queues @@ -1819,7 +1820,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) return; for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "veb.%s", + snprintf(p, ETH_GSTRING_LEN, "%s", i40e_gstrings_veb_stats[i].stat_string); p += ETH_GSTRING_LEN; } @@ -1839,7 +1840,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) } for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "port.%s", + snprintf(p, ETH_GSTRING_LEN, "%s", i40e_gstrings_stats[i].stat_string); p += ETH_GSTRING_LEN; } -- cgit v1.2.3 From e08696dcd988dbe5fe8e51753e23d16ee7e75f3e Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:38 -0700 Subject: i40e: update data pointer directly when copying to the buffer A future patch is going to add a helper function i40e_add_ethtool_stats that will help lower the amount of boiler plate code in the i40e_get_ethtool_stats function. This conversion will take place over many patches, and the helper function will work by directly updating a reference to the data pointer. Since this would not work combined with the current method of accessing data like an array, update all the code that copies stats into the data buffer to use direct updates to the pointer instead of array accesses. This will prevent incorrect stat updates for patches in between the conversion. Similarly, when copying strings, we used a separate char *p pointer. Instead, use the data pointer directly as it's already a (u8 *) type which is the same size. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 117 ++++++++++++------------- 1 file changed, 58 insertions(+), 59 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 6b34845d251c..44a2803cb1ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1699,7 +1699,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; unsigned int j; - int i = 0; char *p; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; @@ -1708,12 +1707,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; - data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == + *(data++) = (i40e_gstrings_net_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < I40E_MISC_STATS_LEN; j++) { p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; - data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == + *(data++) = (i40e_gstrings_misc_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } rcu_read_lock(); @@ -1724,29 +1723,29 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, /* Bump the stat counter to skip these stats, and make * sure the memory is zero'd */ - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; + *(data++) = 0; + *(data++) = 0; + *(data++) = 0; + *(data++) = 0; continue; } /* process Tx ring statistics */ do { start = u64_stats_fetch_begin_irq(&tx_ring->syncp); - data[i] = tx_ring->stats.packets; - data[i + 1] = tx_ring->stats.bytes; + data[0] = tx_ring->stats.packets; + data[1] = tx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - i += 2; + data += 2; /* Rx ring is the 2nd half of the queue pair */ rx_ring = &tx_ring[1]; do { start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - data[i] = rx_ring->stats.packets; - data[i + 1] = rx_ring->stats.bytes; + data[0] = rx_ring->stats.packets; + data[1] = rx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); - i += 2; + data += 2; } rcu_read_unlock(); if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) @@ -1759,33 +1758,33 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, for (j = 0; j < I40E_VEB_STATS_LEN; j++) { p = (char *)veb; p += i40e_gstrings_veb_stats[j].stat_offset; - data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == + *(data++) = (i40e_gstrings_veb_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { - data[i++] = veb->tc_stats.tc_tx_packets[j]; - data[i++] = veb->tc_stats.tc_tx_bytes[j]; - data[i++] = veb->tc_stats.tc_rx_packets[j]; - data[i++] = veb->tc_stats.tc_rx_bytes[j]; + *(data++) = veb->tc_stats.tc_tx_packets[j]; + *(data++) = veb->tc_stats.tc_tx_bytes[j]; + *(data++) = veb->tc_stats.tc_rx_packets[j]; + *(data++) = veb->tc_stats.tc_rx_bytes[j]; } } else { - i += I40E_VEB_STATS_TOTAL; + data += I40E_VEB_STATS_TOTAL; } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; - data[i++] = (i40e_gstrings_stats[j].sizeof_stat == + *(data++) = (i40e_gstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - data[i++] = pf->stats.priority_xon_tx[j]; - data[i++] = pf->stats.priority_xoff_tx[j]; + *(data++) = pf->stats.priority_xon_tx[j]; + *(data++) = pf->stats.priority_xoff_tx[j]; } for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - data[i++] = pf->stats.priority_xon_rx[j]; - data[i++] = pf->stats.priority_xoff_rx[j]; + *(data++) = pf->stats.priority_xon_rx[j]; + *(data++) = pf->stats.priority_xoff_rx[j]; } for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) - data[i++] = pf->stats.priority_xon_2_xoff[j]; + *(data++) = pf->stats.priority_xon_2_xoff[j]; } static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) @@ -1797,73 +1796,73 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) u8 *p = data; for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", + snprintf(data, ETH_GSTRING_LEN, "%s", i40e_gstrings_net_stats[i].stat_string); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MISC_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", + snprintf(data, ETH_GSTRING_LEN, "%s", i40e_gstrings_misc_stats[i].stat_string); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); - p += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + data += ETH_GSTRING_LEN; } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", + snprintf(data, ETH_GSTRING_LEN, "%s", i40e_gstrings_veb_stats[i].stat_string); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - snprintf(p, ETH_GSTRING_LEN, + snprintf(data, ETH_GSTRING_LEN, "veb.tc_%u_tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "veb.tc_%u_tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "veb.tc_%u_rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "veb.tc_%u_rx_bytes", i); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", + snprintf(data, ETH_GSTRING_LEN, "%s", i40e_gstrings_stats[i].stat_string); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, + snprintf(data, ETH_GSTRING_LEN, "port.tx_priority_%u_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "port.tx_priority_%u_xoff", i); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, + snprintf(data, ETH_GSTRING_LEN, "port.rx_priority_%u_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "port.rx_priority_%u_xoff", i); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, + snprintf(data, ETH_GSTRING_LEN, "port.rx_priority_%u_xon_2_xoff", i); - p += ETH_GSTRING_LEN; + data += ETH_GSTRING_LEN; } WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, -- cgit v1.2.3 From ec29bbf8abaca7b295abd4a4204461ca9f82c3f5 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:39 -0700 Subject: i40e: add function doc headers for ethtool stats functions Add documentation for the i40e_get_stats_count, i40e_get_stat_strings and i40e_get_ethtool_stats explaining that the number and ordering of statistics must remain constant for a given netdevice. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 38 ++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 44a2803cb1ec..4b5ecba0148c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1660,6 +1660,20 @@ done: return err; } +/** + * i40e_get_stats_count - return the stats count for a device + * @netdev: the netdev to return the count for + * + * Returns the total number of statistics for this netdev. Note that even + * though this is a function, it is required that the count for a specific + * netdev must never change. Basing the count on static values such as the + * maximum number of queues or the device type is ok. However, the API for + * obtaining stats is *not* safe against changes based on non-static + * values such as the *current* number of queues, or runtime flags. + * + * If a statistic is not always enabled, return it as part of the count + * anyways, always return its string, and report its value as zero. + **/ static int i40e_get_stats_count(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -1691,6 +1705,20 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } } +/** + * i40e_get_ethtool_stats - copy stat values into supplied buffer + * @netdev: the netdev to collect stats for + * @stats: ethtool stats command structure + * @data: ethtool supplied buffer + * + * Copy the stats values for this netdev into the buffer. Expects data to be + * pre-allocated to the size returned by i40e_get_stats_count.. Note that all + * statistics must be copied in a static order, and the count must not change + * for a given netdev. See i40e_get_stats_count for more details. + * + * If a statistic is not currently valid (such as a disabled queue), this + * function reports its value as zero. + **/ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -1787,6 +1815,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, *(data++) = pf->stats.priority_xon_2_xoff[j]; } +/** + * i40e_get_stat_strings - copy stat strings into supplied buffer + * @netdev: the netdev to collect strings for + * @data: supplied buffer to copy strings into + * + * Copy the strings related to stats for this netdev. Expects data to be + * pre-allocated with the size reported by i40e_get_stats_count. Note that the + * strings must be copied in a static order and the total count must not + * change for a given netdev. See i40e_get_stats_count for more details. + **/ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); -- cgit v1.2.3 From 3f76bdb437c6a9a4621ee64f1c877b7532efa7d3 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 17 May 2018 01:08:40 -0700 Subject: i40e: use the more traditional 'i' loop variable Since we no longer use i as an array index for the data variable, replace the use of 'j' with 'i' so that we match the general loop variable name. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 56 +++++++++++++------------- 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4b5ecba0148c..6947a2a571cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1726,26 +1726,26 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - unsigned int j; + unsigned int i; char *p; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; i40e_update_stats(vsi); - for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { - p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; - *(data++) = (i40e_gstrings_net_stats[j].sizeof_stat == + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset; + *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MISC_STATS_LEN; j++) { - p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; - *(data++) = (i40e_gstrings_misc_stats[j].sizeof_stat == + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset; + *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } rcu_read_lock(); - for (j = 0; j < I40E_MAX_NUM_QUEUES(netdev) ; j++) { - tx_ring = READ_ONCE(vsi->tx_rings[j]); + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { + tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) { /* Bump the stat counter to skip these stats, and make @@ -1783,36 +1783,36 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { struct i40e_veb *veb = pf->veb[pf->lan_veb]; - for (j = 0; j < I40E_VEB_STATS_LEN; j++) { + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { p = (char *)veb; - p += i40e_gstrings_veb_stats[j].stat_offset; - *(data++) = (i40e_gstrings_veb_stats[j].sizeof_stat == + p += i40e_gstrings_veb_stats[i].stat_offset; + *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { - *(data++) = veb->tc_stats.tc_tx_packets[j]; - *(data++) = veb->tc_stats.tc_tx_bytes[j]; - *(data++) = veb->tc_stats.tc_rx_packets[j]; - *(data++) = veb->tc_stats.tc_rx_bytes[j]; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + *(data++) = veb->tc_stats.tc_tx_packets[i]; + *(data++) = veb->tc_stats.tc_tx_bytes[i]; + *(data++) = veb->tc_stats.tc_rx_packets[i]; + *(data++) = veb->tc_stats.tc_rx_bytes[i]; } } else { data += I40E_VEB_STATS_TOTAL; } - for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { - p = (char *)pf + i40e_gstrings_stats[j].stat_offset; - *(data++) = (i40e_gstrings_stats[j].sizeof_stat == + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + p = (char *)pf + i40e_gstrings_stats[i].stat_offset; + *(data++) = (i40e_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - *(data++) = pf->stats.priority_xon_tx[j]; - *(data++) = pf->stats.priority_xoff_tx[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + *(data++) = pf->stats.priority_xon_tx[i]; + *(data++) = pf->stats.priority_xoff_tx[i]; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - *(data++) = pf->stats.priority_xon_rx[j]; - *(data++) = pf->stats.priority_xoff_rx[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + *(data++) = pf->stats.priority_xon_rx[i]; + *(data++) = pf->stats.priority_xoff_rx[i]; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) - *(data++) = pf->stats.priority_xon_2_xoff[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + *(data++) = pf->stats.priority_xon_2_xoff[i]; } /** -- cgit v1.2.3 From 5a15a1b07c51dffcb4c4b6be0f8af1bb964162e0 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 21 May 2018 10:26:52 -0700 Subject: mlxsw: spectrum_router: Add support for route append Handle append for gateway based routes. Dev-only multipath routes will be handled by a follow on patch. Signed-off-by: Ido Schimmel Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8028d221aece..77b2adb29341 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5725,6 +5725,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; err = mlxsw_sp_router_fib6_add(mlxsw_sp, @@ -5831,6 +5832,7 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: fen6_info = container_of(info, struct fib6_entry_notifier_info, -- cgit v1.2.3 From dd006921d67f4a96f3d1fa763aad4d5dcd86959b Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:37 -0700 Subject: qed: Add MFW interfaces for TLV request support. The patch adds required management firmware (MFW) interfaces such as mailbox commands, TLV types etc. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 231 ++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b5f70eff0182..8e1e6e1eb40e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11863,6 +11863,8 @@ struct public_global { u32 running_bundle_id; s32 external_temperature; u32 mdump_reason; + u32 data_ptr; + u32 data_size; }; struct fw_flr_mb { @@ -12322,6 +12324,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 +#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 #define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F #define RESOURCE_CMD_REQ_RESC_SHIFT 0 @@ -12523,6 +12526,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_GET_TLV_REQ, MFW_DRV_MSG_MAX }; @@ -12558,6 +12562,233 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT +}; + struct nvm_cfg_mac_address { u32 mac_addr_hi; #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF -- cgit v1.2.3 From 2528c389936efbbece25088426fe7c3c91ff355f Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:38 -0700 Subject: qed: Add support for tlv request processing. The patch adds driver support for processing TLV requests/repsonses from the mfw and upper driver layers respectively. The implementation reads the requested TLVs from the shared memory, requests the values from upper layer drivers, populates this info (TLVs) shared memory and notifies MFW about the TLV values. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 5 + drivers/net/ethernet/qlogic/qed/qed_main.c | 6 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 53 ++++ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 378 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 37 +++ 6 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index c70cf2ad81c0..a0acb94d65f0 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o + qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index adcff495466e..dfdbe529e2bf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -92,6 +92,8 @@ struct qed_eth_cb_ops; struct qed_dev_info; union qed_mcp_protocol_stats; enum qed_mcp_protocol_type; +enum qed_mfw_tlv_type; +union qed_mfw_tlv_data; /* helpers */ #define QED_MFW_GET_FIELD(name, field) \ @@ -907,4 +909,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, int qed_slowpath_irq_req(struct qed_hwfn *hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, + enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_data); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9feed3b79cd4..cbf0ea97fc8e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2088,3 +2088,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, return; } } + +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_buf) +{ + return -EINVAL; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 250579ba632b..591877fca67a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -213,6 +213,40 @@ enum qed_ov_wol { QED_OV_WOL_ENABLED }; +enum qed_mfw_tlv_type { + QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ + QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ + QED_MFW_TLV_MAX = 0x4, +}; + +struct qed_mfw_tlv_generic { +#define QED_MFW_TLV_FLAGS_SIZE 2 + struct { + u8 ipv4_csum_offload; + u8 lso_supported; + bool b_set; + } flags; + +#define QED_MFW_TLV_MAC_COUNT 3 + /* First entry for primary MAC, 2 secondary MACs possible */ + u8 mac[QED_MFW_TLV_MAC_COUNT][6]; + bool mac_set[QED_MFW_TLV_MAC_COUNT]; + + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +union qed_mfw_tlv_data { + struct qed_mfw_tlv_generic generic; + struct qed_mfw_tlv_eth eth; +}; + /** * @brief - returns the link params of the hw function * @@ -561,6 +595,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct bist_nvm_image_att *p_image_att, u32 image_index); +/** + * @brief - Processes the TLV request from MFW i.e., get the required TLV info + * from the qed client and send it to the MFW. + * + * @param p_hwfn + * @param p_ptt + * + * @param return 0 upon success. + */ +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -621,6 +666,14 @@ struct qed_mcp_mb_params { u32 mcp_param; }; +struct qed_drv_tlv_hdr { + u8 tlv_type; + u8 tlv_length; /* In dwords - not including this header */ + u8 tlv_reserved; +#define QED_DRV_TLV_FLAGS_CHANGED 0x01 + u8 tlv_flags; +}; + /** * @brief Initialize the interface with the MCP * diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c new file mode 100644 index 000000000000..d58a7146cce5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define TLV_TYPE(p) (p[0]) +#define TLV_LENGTH(p) (p[1]) +#define TLV_FLAGS(p) (p[3]) + +#define QED_TLV_DATA_MAX (14) +struct qed_tlv_parsed_buf { + /* To be filled with the address to set in Value field */ + void *p_val; + + /* To be used internally in case the value has to be modified */ + u8 data[QED_TLV_DATA_MAX]; +}; + +static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) +{ + switch (tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + case DRV_TLV_OS_DRIVER_STATES: + case DRV_TLV_PXE_BOOT_PROGRESS: + case DRV_TLV_RX_FRAMES_RECEIVED: + case DRV_TLV_RX_BYTES_RECEIVED: + case DRV_TLV_TX_FRAMES_SENT: + case DRV_TLV_TX_BYTES_SENT: + case DRV_TLV_NPIV_ENABLED: + case DRV_TLV_PCIE_BUS_RX_UTILIZATION: + case DRV_TLV_PCIE_BUS_TX_UTILIZATION: + case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION: + case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED: + case DRV_TLV_NCSI_RX_BYTES_RECEIVED: + case DRV_TLV_NCSI_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_GENERIC; + break; + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + case DRV_TLV_PROMISCUOUS_MODE: + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_IOV_OFFLOAD: + case DRV_TLV_TX_QUEUES_EMPTY: + case DRV_TLV_RX_QUEUES_EMPTY: + case DRV_TLV_TX_QUEUES_FULL: + case DRV_TLV_RX_QUEUES_FULL: + *tlv_group |= QED_MFW_TLV_ETH; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Returns size of the data buffer or, -1 in case TLV data is not available. */ +static int +qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_generic *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + if (p_drv_buf->flags.b_set) { + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ? + 1 : 0; + p_buf->data[0] |= (p_drv_buf->flags.lso_supported ? + 1 : 0) << 1; + p_buf->p_val = p_buf->data; + return QED_MFW_TLV_FLAGS_SIZE; + } + break; + + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + { + int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR; + + if (p_drv_buf->mac_set[idx]) { + p_buf->p_val = p_drv_buf->mac[idx]; + return ETH_ALEN; + } + break; + } + + case DRV_TLV_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_eth *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + if (p_drv_buf->lso_maxoff_size_set) { + p_buf->p_val = &p_drv_buf->lso_maxoff_size; + return sizeof(p_drv_buf->lso_maxoff_size); + } + break; + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + if (p_drv_buf->lso_minseg_size_set) { + p_buf->p_val = &p_drv_buf->lso_minseg_size; + return sizeof(p_drv_buf->lso_minseg_size); + } + break; + case DRV_TLV_PROMISCUOUS_MODE: + if (p_drv_buf->prom_mode_set) { + p_buf->p_val = &p_drv_buf->prom_mode; + return sizeof(p_drv_buf->prom_mode); + } + break; + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->tx_descr_size; + return sizeof(p_drv_buf->tx_descr_size); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->rx_descr_size; + return sizeof(p_drv_buf->rx_descr_size); + } + break; + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + if (p_drv_buf->netq_count_set) { + p_buf->p_val = &p_drv_buf->netq_count; + return sizeof(p_drv_buf->netq_count); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + if (p_drv_buf->tcp4_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp4_offloads; + return sizeof(p_drv_buf->tcp4_offloads); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + if (p_drv_buf->tcp6_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp6_offloads; + return sizeof(p_drv_buf->tcp6_offloads); + } + break; + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_descr_qdepth; + return sizeof(p_drv_buf->tx_descr_qdepth); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_descr_qdepth; + return sizeof(p_drv_buf->rx_descr_qdepth); + } + break; + case DRV_TLV_IOV_OFFLOAD: + if (p_drv_buf->iov_offload_set) { + p_buf->p_val = &p_drv_buf->iov_offload; + return sizeof(p_drv_buf->iov_offload); + } + break; + case DRV_TLV_TX_QUEUES_EMPTY: + if (p_drv_buf->txqs_empty_set) { + p_buf->p_val = &p_drv_buf->txqs_empty; + return sizeof(p_drv_buf->txqs_empty); + } + break; + case DRV_TLV_RX_QUEUES_EMPTY: + if (p_drv_buf->rxqs_empty_set) { + p_buf->p_val = &p_drv_buf->rxqs_empty; + return sizeof(p_drv_buf->rxqs_empty); + } + break; + case DRV_TLV_TX_QUEUES_FULL: + if (p_drv_buf->num_txqs_full_set) { + p_buf->p_val = &p_drv_buf->num_txqs_full; + return sizeof(p_drv_buf->num_txqs_full); + } + break; + case DRV_TLV_RX_QUEUES_FULL: + if (p_drv_buf->num_rxqs_full_set) { + p_buf->p_val = &p_drv_buf->num_rxqs_full; + return sizeof(p_drv_buf->num_rxqs_full); + } + break; + default: + break; + } + + return -1; +} + +static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, + u8 tlv_group, u8 *p_mfw_buf, u32 size) +{ + union qed_mfw_tlv_data *p_tlv_data; + struct qed_tlv_parsed_buf buffer; + struct qed_drv_tlv_hdr tlv; + int len = 0; + u32 offset; + u8 *p_tlv; + + p_tlv_data = vzalloc(sizeof(*p_tlv_data)); + if (!p_tlv_data) + return -ENOMEM; + + if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) { + vfree(p_tlv_data); + return -EINVAL; + } + + memset(&tlv, 0, sizeof(tlv)); + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_tlv = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_tlv); + tlv.tlv_length = TLV_LENGTH(p_tlv); + tlv.tlv_flags = TLV_FLAGS(p_tlv); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Type %d length = %d flags = 0x%x\n", tlv.tlv_type, + tlv.tlv_length, tlv.tlv_flags); + + if (tlv_group == QED_MFW_TLV_GENERIC) + len = qed_mfw_get_gen_tlv_value(&tlv, + &p_tlv_data->generic, + &buffer); + else if (tlv_group == QED_MFW_TLV_ETH) + len = qed_mfw_get_eth_tlv_value(&tlv, + &p_tlv_data->eth, + &buffer); + + if (len > 0) { + WARN(len > 4 * tlv.tlv_length, + "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n", + len, 4 * tlv.tlv_length); + len = min_t(int, len, 4 * tlv.tlv_length); + tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED; + TLV_FLAGS(p_tlv) = tlv.tlv_flags; + memcpy(p_mfw_buf + offset + sizeof(tlv), + buffer.p_val, len); + } + } + + vfree(p_tlv_data); + + return 0; +} + +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr, size, offset, resp, param, val, global_offsize, global_addr; + u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp; + struct qed_drv_tlv_hdr tlv; + int rc; + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, data_ptr); + addr = qed_rd(p_hwfn, p_ptt, addr); + size = qed_rd(p_hwfn, p_ptt, global_addr + + offsetof(struct public_global, data_size)); + + if (!size) { + DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size); + goto drv_done; + } + + p_mfw_buf = vzalloc(size); + if (!p_mfw_buf) { + DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n"); + goto drv_done; + } + + /* Read the TLV request to local buffer. MFW represents the TLV in + * little endian format and mcp returns it bigendian format. Hence + * driver need to convert data to little endian first and then do the + * memcpy (casting) to preserve the MFW TLV format in the driver buffer. + * + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = qed_rd(p_hwfn, p_ptt, addr + offset); + val = be32_to_cpu(val); + memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); + } + + /* Parse the headers to enumerate the requested TLV groups */ + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Un recognized TLV %d\n", tlv.tlv_type); + } + + /* Sanitize the TLV groups according to personality */ + if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping L2 TLVs for non-L2 function\n"); + tlv_group &= ~QED_MFW_TLV_ETH; + } + + /* Update the TLV values in the local buffer */ + for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { + if (tlv_group & id) + if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) + goto drv_done; + } + + /* Write the TLV data to shared memory. The stream of 4 bytes first need + * to be mem-copied to u32 element to make it as LSB format. And then + * converted to big endian as required by mcp-write. + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); + val = cpu_to_be32(val); + qed_wr(p_hwfn, p_ptt, addr + offset, val); + } + +drv_done: + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, + ¶m); + + vfree(p_mfw_buf); + + return rc; +} diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 907976fd56f7..8e4fad427ac2 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -182,6 +182,43 @@ enum qed_led_mode { QED_LED_MODE_RESTORE }; +struct qed_mfw_tlv_eth { + u16 lso_maxoff_size; + bool lso_maxoff_size_set; + u16 lso_minseg_size; + bool lso_minseg_size_set; + u8 prom_mode; + bool prom_mode_set; + u16 tx_descr_size; + bool tx_descr_size_set; + u16 rx_descr_size; + bool rx_descr_size_set; + u16 netq_count; + bool netq_count_set; + u32 tcp4_offloads; + bool tcp4_offloads_set; + u32 tcp6_offloads; + bool tcp6_offloads_set; + u16 tx_descr_qdepth; + bool tx_descr_qdepth_set; + u16 rx_descr_qdepth; + bool rx_descr_qdepth_set; + u8 iov_offload; +#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0) +#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1) +#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2) +#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3) + bool iov_offload_set; + u8 txqs_empty; + bool txqs_empty_set; + u8 rxqs_empty; + bool rxqs_empty_set; + u8 num_txqs_full; + bool num_txqs_full_set; + u8 num_rxqs_full; + bool num_rxqs_full_set; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From f240b6882211aae7155a9839dff1426e2853fe30 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:39 -0700 Subject: qed: Add support for processing fcoe tlv request. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 828 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 193 ++++++ 3 files changed, 1024 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 591877fca67a..b31f5d8bd27c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -216,7 +216,8 @@ enum qed_ov_wol { enum qed_mfw_tlv_type { QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ - QED_MFW_TLV_MAX = 0x4, + QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ + QED_MFW_TLV_MAX = 0x8, }; struct qed_mfw_tlv_generic { @@ -245,6 +246,7 @@ struct qed_mfw_tlv_generic { union qed_mfw_tlv_data { struct qed_mfw_tlv_generic generic; struct qed_mfw_tlv_eth eth; + struct qed_mfw_tlv_fcoe fcoe; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index d58a7146cce5..1873cfc08715 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -64,6 +64,157 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) case DRV_TLV_RX_QUEUES_FULL: *tlv_group |= QED_MFW_TLV_ETH; break; + case DRV_TLV_SCSI_TO: + case DRV_TLV_R_T_TOV: + case DRV_TLV_R_A_TOV: + case DRV_TLV_E_D_TOV: + case DRV_TLV_CR_TOV: + case DRV_TLV_BOOT_TYPE: + case DRV_TLV_NPIV_STATE: + case DRV_TLV_NUM_OF_NPIV_IDS: + case DRV_TLV_SWITCH_NAME: + case DRV_TLV_SWITCH_PORT_NUM: + case DRV_TLV_SWITCH_PORT_ID: + case DRV_TLV_VENDOR_NAME: + case DRV_TLV_SWITCH_MODEL: + case DRV_TLV_SWITCH_FW_VER: + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + case DRV_TLV_PORT_ALIAS: + case DRV_TLV_PORT_STATE: + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_LINK_FAILURE_COUNT: + case DRV_TLV_FCOE_BOOT_PROGRESS: + case DRV_TLV_RX_BROADCAST_PACKETS: + case DRV_TLV_TX_BROADCAST_PACKETS: + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + case DRV_TLV_FCOE_TX_FRAMES_SENT: + case DRV_TLV_FCOE_TX_BYTES_SENT: + case DRV_TLV_CRC_ERROR_COUNT: + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + case DRV_TLV_DISPARITY_ERROR_COUNT: + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_RJT: + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + case DRV_TLV_FDISCS_SENT_COUNT: + case DRV_TLV_FDISC_ACCS_RECEIVED: + case DRV_TLV_FDISC_RJTS_RECEIVED: + case DRV_TLV_PLOGI_SENT_COUNT: + case DRV_TLV_PLOGI_ACCS_RECEIVED: + case DRV_TLV_PLOGI_RJTS_RECEIVED: + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_TIMESTAMP: + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + case DRV_TLV_LOGOS_ISSUED: + case DRV_TLV_LOGO_ACCS_RECEIVED: + case DRV_TLV_LOGO_RJTS_RECEIVED: + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_TIMESTAMP: + case DRV_TLV_LOGOS_RECEIVED: + case DRV_TLV_ACCS_ISSUED: + case DRV_TLV_PRLIS_ISSUED: + case DRV_TLV_ACCS_RECEIVED: + case DRV_TLV_ABTS_SENT_COUNT: + case DRV_TLV_ABTS_ACCS_RECEIVED: + case DRV_TLV_ABTS_RJTS_RECEIVED: + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_TIMESTAMP: + case DRV_TLV_RSCNS_RECEIVED: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + case DRV_TLV_LUN_RESETS_ISSUED: + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + case DRV_TLV_TPRLOS_SENT: + case DRV_TLV_NOS_SENT_COUNT: + case DRV_TLV_NOS_RECEIVED_COUNT: + case DRV_TLV_OLS_COUNT: + case DRV_TLV_LR_COUNT: + case DRV_TLV_LRR_COUNT: + case DRV_TLV_LIP_SENT_COUNT: + case DRV_TLV_LIP_RECEIVED_COUNT: + case DRV_TLV_EOFA_COUNT: + case DRV_TLV_EOFNI_COUNT: + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + *tlv_group = QED_MFW_TLV_FCOE; + break; default: return -EINVAL; } @@ -237,6 +388,672 @@ qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, return -1; } +static int +qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, + struct qed_tlv_parsed_buf *p_buf) +{ + if (!p_time->b_set) + return -1; + + /* Validate numbers */ + if (p_time->month > 12) + p_time->month = 0; + if (p_time->day > 31) + p_time->day = 0; + if (p_time->hour > 23) + p_time->hour = 0; + if (p_time->min > 59) + p_time->hour = 0; + if (p_time->msec > 999) + p_time->msec = 0; + if (p_time->usec > 999) + p_time->usec = 0; + + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + snprintf(p_buf->data, 14, "%d%d%d%d%d%d", + p_time->month, p_time->day, + p_time->hour, p_time->min, p_time->msec, p_time->usec); + + p_buf->p_val = p_buf->data; + + return QED_MFW_TLV_TIME_SIZE; +} + +static int +qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_fcoe *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + struct qed_mfw_tlv_time *p_time; + u8 idx; + + switch (p_tlv->tlv_type) { + case DRV_TLV_SCSI_TO: + if (p_drv_buf->scsi_timeout_set) { + p_buf->p_val = &p_drv_buf->scsi_timeout; + return sizeof(p_drv_buf->scsi_timeout); + } + break; + case DRV_TLV_R_T_TOV: + if (p_drv_buf->rt_tov_set) { + p_buf->p_val = &p_drv_buf->rt_tov; + return sizeof(p_drv_buf->rt_tov); + } + break; + case DRV_TLV_R_A_TOV: + if (p_drv_buf->ra_tov_set) { + p_buf->p_val = &p_drv_buf->ra_tov; + return sizeof(p_drv_buf->ra_tov); + } + break; + case DRV_TLV_E_D_TOV: + if (p_drv_buf->ed_tov_set) { + p_buf->p_val = &p_drv_buf->ed_tov; + return sizeof(p_drv_buf->ed_tov); + } + break; + case DRV_TLV_CR_TOV: + if (p_drv_buf->cr_tov_set) { + p_buf->p_val = &p_drv_buf->cr_tov; + return sizeof(p_drv_buf->cr_tov); + } + break; + case DRV_TLV_BOOT_TYPE: + if (p_drv_buf->boot_type_set) { + p_buf->p_val = &p_drv_buf->boot_type; + return sizeof(p_drv_buf->boot_type); + } + break; + case DRV_TLV_NPIV_STATE: + if (p_drv_buf->npiv_state_set) { + p_buf->p_val = &p_drv_buf->npiv_state; + return sizeof(p_drv_buf->npiv_state); + } + break; + case DRV_TLV_NUM_OF_NPIV_IDS: + if (p_drv_buf->num_npiv_ids_set) { + p_buf->p_val = &p_drv_buf->num_npiv_ids; + return sizeof(p_drv_buf->num_npiv_ids); + } + break; + case DRV_TLV_SWITCH_NAME: + if (p_drv_buf->switch_name_set) { + p_buf->p_val = &p_drv_buf->switch_name; + return sizeof(p_drv_buf->switch_name); + } + break; + case DRV_TLV_SWITCH_PORT_NUM: + if (p_drv_buf->switch_portnum_set) { + p_buf->p_val = &p_drv_buf->switch_portnum; + return sizeof(p_drv_buf->switch_portnum); + } + break; + case DRV_TLV_SWITCH_PORT_ID: + if (p_drv_buf->switch_portid_set) { + p_buf->p_val = &p_drv_buf->switch_portid; + return sizeof(p_drv_buf->switch_portid); + } + break; + case DRV_TLV_VENDOR_NAME: + if (p_drv_buf->vendor_name_set) { + p_buf->p_val = &p_drv_buf->vendor_name; + return sizeof(p_drv_buf->vendor_name); + } + break; + case DRV_TLV_SWITCH_MODEL: + if (p_drv_buf->switch_model_set) { + p_buf->p_val = &p_drv_buf->switch_model; + return sizeof(p_drv_buf->switch_model); + } + break; + case DRV_TLV_SWITCH_FW_VER: + if (p_drv_buf->switch_fw_version_set) { + p_buf->p_val = &p_drv_buf->switch_fw_version; + return sizeof(p_drv_buf->switch_fw_version); + } + break; + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + if (p_drv_buf->qos_pri_set) { + p_buf->p_val = &p_drv_buf->qos_pri; + return sizeof(p_drv_buf->qos_pri); + } + break; + case DRV_TLV_PORT_ALIAS: + if (p_drv_buf->port_alias_set) { + p_buf->p_val = &p_drv_buf->port_alias; + return sizeof(p_drv_buf->port_alias); + } + break; + case DRV_TLV_PORT_STATE: + if (p_drv_buf->port_state_set) { + p_buf->p_val = &p_drv_buf->port_state; + return sizeof(p_drv_buf->port_state); + } + break; + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_tx_descr_size; + return sizeof(p_drv_buf->fip_tx_descr_size); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_rx_descr_size; + return sizeof(p_drv_buf->fip_rx_descr_size); + } + break; + case DRV_TLV_LINK_FAILURE_COUNT: + if (p_drv_buf->link_failures_set) { + p_buf->p_val = &p_drv_buf->link_failures; + return sizeof(p_drv_buf->link_failures); + } + break; + case DRV_TLV_FCOE_BOOT_PROGRESS: + if (p_drv_buf->fcoe_boot_progress_set) { + p_buf->p_val = &p_drv_buf->fcoe_boot_progress; + return sizeof(p_drv_buf->fcoe_boot_progress); + } + break; + case DRV_TLV_RX_BROADCAST_PACKETS: + if (p_drv_buf->rx_bcast_set) { + p_buf->p_val = &p_drv_buf->rx_bcast; + return sizeof(p_drv_buf->rx_bcast); + } + break; + case DRV_TLV_TX_BROADCAST_PACKETS: + if (p_drv_buf->tx_bcast_set) { + p_buf->p_val = &p_drv_buf->tx_bcast; + return sizeof(p_drv_buf->tx_bcast); + } + break; + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_txq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_txq_depth; + return sizeof(p_drv_buf->fcoe_txq_depth); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_rxq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_rxq_depth; + return sizeof(p_drv_buf->fcoe_rxq_depth); + } + break; + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + if (p_drv_buf->fcoe_rx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_frames; + return sizeof(p_drv_buf->fcoe_rx_frames); + } + break; + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + if (p_drv_buf->fcoe_rx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_bytes; + return sizeof(p_drv_buf->fcoe_rx_bytes); + } + break; + case DRV_TLV_FCOE_TX_FRAMES_SENT: + if (p_drv_buf->fcoe_tx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_frames; + return sizeof(p_drv_buf->fcoe_tx_frames); + } + break; + case DRV_TLV_FCOE_TX_BYTES_SENT: + if (p_drv_buf->fcoe_tx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_bytes; + return sizeof(p_drv_buf->fcoe_tx_bytes); + } + break; + case DRV_TLV_CRC_ERROR_COUNT: + if (p_drv_buf->crc_count_set) { + p_buf->p_val = &p_drv_buf->crc_count; + return sizeof(p_drv_buf->crc_count); + } + break; + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->crc_err_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx]; + return sizeof(p_drv_buf->crc_err_src_fcid[idx]); + } + break; + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx], + p_buf); + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + if (p_drv_buf->losync_err_set) { + p_buf->p_val = &p_drv_buf->losync_err; + return sizeof(p_drv_buf->losync_err); + } + break; + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + if (p_drv_buf->losig_err_set) { + p_buf->p_val = &p_drv_buf->losig_err; + return sizeof(p_drv_buf->losig_err); + } + break; + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + if (p_drv_buf->primtive_err_set) { + p_buf->p_val = &p_drv_buf->primtive_err; + return sizeof(p_drv_buf->primtive_err); + } + break; + case DRV_TLV_DISPARITY_ERROR_COUNT: + if (p_drv_buf->disparity_err_set) { + p_buf->p_val = &p_drv_buf->disparity_err; + return sizeof(p_drv_buf->disparity_err); + } + break; + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + if (p_drv_buf->code_violation_err_set) { + p_buf->p_val = &p_drv_buf->code_violation_err; + return sizeof(p_drv_buf->code_violation_err); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1; + if (p_drv_buf->flogi_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_param[idx]; + return sizeof(p_drv_buf->flogi_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1; + + if (p_drv_buf->flogi_acc_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_acc_param[idx]; + return sizeof(p_drv_buf->flogi_acc_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_RJT: + if (p_drv_buf->flogi_rjt_set) { + p_buf->p_val = &p_drv_buf->flogi_rjt; + return sizeof(p_drv_buf->flogi_rjt); + } + break; + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp, + p_buf); + case DRV_TLV_FDISCS_SENT_COUNT: + if (p_drv_buf->fdiscs_set) { + p_buf->p_val = &p_drv_buf->fdiscs; + return sizeof(p_drv_buf->fdiscs); + } + break; + case DRV_TLV_FDISC_ACCS_RECEIVED: + if (p_drv_buf->fdisc_acc_set) { + p_buf->p_val = &p_drv_buf->fdisc_acc; + return sizeof(p_drv_buf->fdisc_acc); + } + break; + case DRV_TLV_FDISC_RJTS_RECEIVED: + if (p_drv_buf->fdisc_rjt_set) { + p_buf->p_val = &p_drv_buf->fdisc_rjt; + return sizeof(p_drv_buf->fdisc_rjt); + } + break; + case DRV_TLV_PLOGI_SENT_COUNT: + if (p_drv_buf->plogi_set) { + p_buf->p_val = &p_drv_buf->plogi; + return sizeof(p_drv_buf->plogi); + } + break; + case DRV_TLV_PLOGI_ACCS_RECEIVED: + if (p_drv_buf->plogi_acc_set) { + p_buf->p_val = &p_drv_buf->plogi_acc; + return sizeof(p_drv_buf->plogi_acc); + } + break; + case DRV_TLV_PLOGI_RJTS_RECEIVED: + if (p_drv_buf->plogi_rjt_set) { + p_buf->p_val = &p_drv_buf->plogi_rjt; + return sizeof(p_drv_buf->plogi_rjt); + } + break; + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->plogi_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx]; + return sizeof(p_drv_buf->plogi_dst_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx], + p_buf); + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->plogi_acc_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2; + p_time = &p_drv_buf->plogi_acc_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + case DRV_TLV_LOGOS_ISSUED: + if (p_drv_buf->tx_plogos_set) { + p_buf->p_val = &p_drv_buf->tx_plogos; + return sizeof(p_drv_buf->tx_plogos); + } + break; + case DRV_TLV_LOGO_ACCS_RECEIVED: + if (p_drv_buf->plogo_acc_set) { + p_buf->p_val = &p_drv_buf->plogo_acc; + return sizeof(p_drv_buf->plogo_acc); + } + break; + case DRV_TLV_LOGO_RJTS_RECEIVED: + if (p_drv_buf->plogo_rjt_set) { + p_buf->p_val = &p_drv_buf->plogo_rjt; + return sizeof(p_drv_buf->plogo_rjt); + } + break; + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) / + 2; + + if (p_drv_buf->plogo_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx]; + return sizeof(p_drv_buf->plogo_src_fcid[idx]); + } + break; + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx], + p_buf); + case DRV_TLV_LOGOS_RECEIVED: + if (p_drv_buf->rx_logos_set) { + p_buf->p_val = &p_drv_buf->rx_logos; + return sizeof(p_drv_buf->rx_logos); + } + break; + case DRV_TLV_ACCS_ISSUED: + if (p_drv_buf->tx_accs_set) { + p_buf->p_val = &p_drv_buf->tx_accs; + return sizeof(p_drv_buf->tx_accs); + } + break; + case DRV_TLV_PRLIS_ISSUED: + if (p_drv_buf->tx_prlis_set) { + p_buf->p_val = &p_drv_buf->tx_prlis; + return sizeof(p_drv_buf->tx_prlis); + } + break; + case DRV_TLV_ACCS_RECEIVED: + if (p_drv_buf->rx_accs_set) { + p_buf->p_val = &p_drv_buf->rx_accs; + return sizeof(p_drv_buf->rx_accs); + } + break; + case DRV_TLV_ABTS_SENT_COUNT: + if (p_drv_buf->tx_abts_set) { + p_buf->p_val = &p_drv_buf->tx_abts; + return sizeof(p_drv_buf->tx_abts); + } + break; + case DRV_TLV_ABTS_ACCS_RECEIVED: + if (p_drv_buf->rx_abts_acc_set) { + p_buf->p_val = &p_drv_buf->rx_abts_acc; + return sizeof(p_drv_buf->rx_abts_acc); + } + break; + case DRV_TLV_ABTS_RJTS_RECEIVED: + if (p_drv_buf->rx_abts_rjt_set) { + p_buf->p_val = &p_drv_buf->rx_abts_rjt; + return sizeof(p_drv_buf->rx_abts_rjt); + } + break; + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->abts_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx]; + return sizeof(p_drv_buf->abts_dst_fcid[idx]); + } + break; + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx], + p_buf); + case DRV_TLV_RSCNS_RECEIVED: + if (p_drv_buf->rx_rscn_set) { + p_buf->p_val = &p_drv_buf->rx_rscn; + return sizeof(p_drv_buf->rx_rscn); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1; + + if (p_drv_buf->rx_rscn_nport_set[idx]) { + p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx]; + return sizeof(p_drv_buf->rx_rscn_nport[idx]); + } + break; + case DRV_TLV_LUN_RESETS_ISSUED: + if (p_drv_buf->tx_lun_rst_set) { + p_buf->p_val = &p_drv_buf->tx_lun_rst; + return sizeof(p_drv_buf->tx_lun_rst); + } + break; + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + if (p_drv_buf->abort_task_sets_set) { + p_buf->p_val = &p_drv_buf->abort_task_sets; + return sizeof(p_drv_buf->abort_task_sets); + } + break; + case DRV_TLV_TPRLOS_SENT: + if (p_drv_buf->tx_tprlos_set) { + p_buf->p_val = &p_drv_buf->tx_tprlos; + return sizeof(p_drv_buf->tx_tprlos); + } + break; + case DRV_TLV_NOS_SENT_COUNT: + if (p_drv_buf->tx_nos_set) { + p_buf->p_val = &p_drv_buf->tx_nos; + return sizeof(p_drv_buf->tx_nos); + } + break; + case DRV_TLV_NOS_RECEIVED_COUNT: + if (p_drv_buf->rx_nos_set) { + p_buf->p_val = &p_drv_buf->rx_nos; + return sizeof(p_drv_buf->rx_nos); + } + break; + case DRV_TLV_OLS_COUNT: + if (p_drv_buf->ols_set) { + p_buf->p_val = &p_drv_buf->ols; + return sizeof(p_drv_buf->ols); + } + break; + case DRV_TLV_LR_COUNT: + if (p_drv_buf->lr_set) { + p_buf->p_val = &p_drv_buf->lr; + return sizeof(p_drv_buf->lr); + } + break; + case DRV_TLV_LRR_COUNT: + if (p_drv_buf->lrr_set) { + p_buf->p_val = &p_drv_buf->lrr; + return sizeof(p_drv_buf->lrr); + } + break; + case DRV_TLV_LIP_SENT_COUNT: + if (p_drv_buf->tx_lip_set) { + p_buf->p_val = &p_drv_buf->tx_lip; + return sizeof(p_drv_buf->tx_lip); + } + break; + case DRV_TLV_LIP_RECEIVED_COUNT: + if (p_drv_buf->rx_lip_set) { + p_buf->p_val = &p_drv_buf->rx_lip; + return sizeof(p_drv_buf->rx_lip); + } + break; + case DRV_TLV_EOFA_COUNT: + if (p_drv_buf->eofa_set) { + p_buf->p_val = &p_drv_buf->eofa; + return sizeof(p_drv_buf->eofa); + } + break; + case DRV_TLV_EOFNI_COUNT: + if (p_drv_buf->eofni_set) { + p_buf->p_val = &p_drv_buf->eofni; + return sizeof(p_drv_buf->eofni); + } + break; + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + if (p_drv_buf->scsi_chks_set) { + p_buf->p_val = &p_drv_buf->scsi_chks; + return sizeof(p_drv_buf->scsi_chks); + } + break; + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_cond_met; + return sizeof(p_drv_buf->scsi_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + if (p_drv_buf->scsi_busy_set) { + p_buf->p_val = &p_drv_buf->scsi_busy; + return sizeof(p_drv_buf->scsi_busy); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + if (p_drv_buf->scsi_inter_set) { + p_buf->p_val = &p_drv_buf->scsi_inter; + return sizeof(p_drv_buf->scsi_inter); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_inter_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_inter_cond_met; + return sizeof(p_drv_buf->scsi_inter_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + if (p_drv_buf->scsi_rsv_conflicts_set) { + p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts; + return sizeof(p_drv_buf->scsi_rsv_conflicts); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + if (p_drv_buf->scsi_tsk_full_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_full; + return sizeof(p_drv_buf->scsi_tsk_full); + } + break; + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + if (p_drv_buf->scsi_aca_active_set) { + p_buf->p_val = &p_drv_buf->scsi_aca_active; + return sizeof(p_drv_buf->scsi_aca_active); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + if (p_drv_buf->scsi_tsk_abort_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_abort; + return sizeof(p_drv_buf->scsi_tsk_abort); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + idx = (p_tlv->tlv_type - + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2; + + if (p_drv_buf->scsi_rx_chk_set[idx]) { + p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx]; + return sizeof(p_drv_buf->scsi_rx_chk[idx]); + } + break; + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2; + p_time = &p_drv_buf->scsi_chk_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + default: + break; + } + + return -1; +} + static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, u8 tlv_group, u8 *p_mfw_buf, u32 size) { @@ -276,6 +1093,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, len = qed_mfw_get_eth_tlv_value(&tlv, &p_tlv_data->eth, &buffer); + else if (tlv_group == QED_MFW_TLV_FCOE) + len = qed_mfw_get_fcoe_tlv_value(&tlv, + &p_tlv_data->fcoe, + &buffer); if (len > 0) { WARN(len > 4 * tlv.tlv_length, @@ -351,6 +1172,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) tlv_group &= ~QED_MFW_TLV_ETH; } + if ((tlv_group & QED_MFW_TLV_FCOE) && + p_hwfn->hw_info.personality != QED_PCI_FCOE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping FCoE TLVs for non-FCoE function\n"); + tlv_group &= ~QED_MFW_TLV_FCOE; + } + /* Update the TLV values in the local buffer */ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { if (tlv_group & id) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8e4fad427ac2..74c2b9ad8afa 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -219,6 +219,199 @@ struct qed_mfw_tlv_eth { bool num_rxqs_full_set; }; +#define QED_MFW_TLV_TIME_SIZE 14 +struct qed_mfw_tlv_time { + bool b_set; + u8 month; + u8 day; + u8 hour; + u8 min; + u16 msec; + u16 usec; +}; + +struct qed_mfw_tlv_fcoe { + u8 scsi_timeout; + bool scsi_timeout_set; + u32 rt_tov; + bool rt_tov_set; + u32 ra_tov; + bool ra_tov_set; + u32 ed_tov; + bool ed_tov_set; + u32 cr_tov; + bool cr_tov_set; + u8 boot_type; + bool boot_type_set; + u8 npiv_state; + bool npiv_state_set; + u32 num_npiv_ids; + bool num_npiv_ids_set; + u8 switch_name[8]; + bool switch_name_set; + u16 switch_portnum; + bool switch_portnum_set; + u8 switch_portid[3]; + bool switch_portid_set; + u8 vendor_name[8]; + bool vendor_name_set; + u8 switch_model[8]; + bool switch_model_set; + u8 switch_fw_version[8]; + bool switch_fw_version_set; + u8 qos_pri; + bool qos_pri_set; + u8 port_alias[3]; + bool port_alias_set; + u8 port_state; +#define QED_MFW_TLV_PORT_STATE_OFFLINE (0) +#define QED_MFW_TLV_PORT_STATE_LOOP (1) +#define QED_MFW_TLV_PORT_STATE_P2P (2) +#define QED_MFW_TLV_PORT_STATE_FABRIC (3) + bool port_state_set; + u16 fip_tx_descr_size; + bool fip_tx_descr_size_set; + u16 fip_rx_descr_size; + bool fip_rx_descr_size_set; + u16 link_failures; + bool link_failures_set; + u8 fcoe_boot_progress; + bool fcoe_boot_progress_set; + u64 rx_bcast; + bool rx_bcast_set; + u64 tx_bcast; + bool tx_bcast_set; + u16 fcoe_txq_depth; + bool fcoe_txq_depth_set; + u16 fcoe_rxq_depth; + bool fcoe_rxq_depth_set; + u64 fcoe_rx_frames; + bool fcoe_rx_frames_set; + u64 fcoe_rx_bytes; + bool fcoe_rx_bytes_set; + u64 fcoe_tx_frames; + bool fcoe_tx_frames_set; + u64 fcoe_tx_bytes; + bool fcoe_tx_bytes_set; + u16 crc_count; + bool crc_count_set; + u32 crc_err_src_fcid[5]; + bool crc_err_src_fcid_set[5]; + struct qed_mfw_tlv_time crc_err[5]; + u16 losync_err; + bool losync_err_set; + u16 losig_err; + bool losig_err_set; + u16 primtive_err; + bool primtive_err_set; + u16 disparity_err; + bool disparity_err_set; + u16 code_violation_err; + bool code_violation_err_set; + u32 flogi_param[4]; + bool flogi_param_set[4]; + struct qed_mfw_tlv_time flogi_tstamp; + u32 flogi_acc_param[4]; + bool flogi_acc_param_set[4]; + struct qed_mfw_tlv_time flogi_acc_tstamp; + u32 flogi_rjt; + bool flogi_rjt_set; + struct qed_mfw_tlv_time flogi_rjt_tstamp; + u32 fdiscs; + bool fdiscs_set; + u8 fdisc_acc; + bool fdisc_acc_set; + u8 fdisc_rjt; + bool fdisc_rjt_set; + u8 plogi; + bool plogi_set; + u8 plogi_acc; + bool plogi_acc_set; + u8 plogi_rjt; + bool plogi_rjt_set; + u32 plogi_dst_fcid[5]; + bool plogi_dst_fcid_set[5]; + struct qed_mfw_tlv_time plogi_tstamp[5]; + u32 plogi_acc_src_fcid[5]; + bool plogi_acc_src_fcid_set[5]; + struct qed_mfw_tlv_time plogi_acc_tstamp[5]; + u8 tx_plogos; + bool tx_plogos_set; + u8 plogo_acc; + bool plogo_acc_set; + u8 plogo_rjt; + bool plogo_rjt_set; + u32 plogo_src_fcid[5]; + bool plogo_src_fcid_set[5]; + struct qed_mfw_tlv_time plogo_tstamp[5]; + u8 rx_logos; + bool rx_logos_set; + u8 tx_accs; + bool tx_accs_set; + u8 tx_prlis; + bool tx_prlis_set; + u8 rx_accs; + bool rx_accs_set; + u8 tx_abts; + bool tx_abts_set; + u8 rx_abts_acc; + bool rx_abts_acc_set; + u8 rx_abts_rjt; + bool rx_abts_rjt_set; + u32 abts_dst_fcid[5]; + bool abts_dst_fcid_set[5]; + struct qed_mfw_tlv_time abts_tstamp[5]; + u8 rx_rscn; + bool rx_rscn_set; + u32 rx_rscn_nport[4]; + bool rx_rscn_nport_set[4]; + u8 tx_lun_rst; + bool tx_lun_rst_set; + u8 abort_task_sets; + bool abort_task_sets_set; + u8 tx_tprlos; + bool tx_tprlos_set; + u8 tx_nos; + bool tx_nos_set; + u8 rx_nos; + bool rx_nos_set; + u8 ols; + bool ols_set; + u8 lr; + bool lr_set; + u8 lrr; + bool lrr_set; + u8 tx_lip; + bool tx_lip_set; + u8 rx_lip; + bool rx_lip_set; + u8 eofa; + bool eofa_set; + u8 eofni; + bool eofni_set; + u8 scsi_chks; + bool scsi_chks_set; + u8 scsi_cond_met; + bool scsi_cond_met_set; + u8 scsi_busy; + bool scsi_busy_set; + u8 scsi_inter; + bool scsi_inter_set; + u8 scsi_inter_cond_met; + bool scsi_inter_cond_met_set; + u8 scsi_rsv_conflicts; + bool scsi_rsv_conflicts_set; + u8 scsi_tsk_full; + bool scsi_tsk_full_set; + u8 scsi_aca_active; + bool scsi_aca_active_set; + u8 scsi_tsk_abort; + bool scsi_tsk_abort_set; + u32 scsi_rx_chk[5]; + bool scsi_rx_chk_set[5]; + struct qed_mfw_tlv_time scsi_chk_tstamp[5]; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From 77a509e4f6d14c2e09acbab5a89a769740bda62c Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:40 -0700 Subject: qed: Add support for processing iscsi tlv request. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 131 ++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 36 +++++++ 3 files changed, 170 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index b31f5d8bd27c..632a838f1fe3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -217,7 +217,8 @@ enum qed_mfw_tlv_type { QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ - QED_MFW_TLV_MAX = 0x8, + QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ + QED_MFW_TLV_MAX = 0x16, }; struct qed_mfw_tlv_generic { @@ -247,6 +248,7 @@ union qed_mfw_tlv_data { struct qed_mfw_tlv_generic generic; struct qed_mfw_tlv_eth eth; struct qed_mfw_tlv_fcoe fcoe; + struct qed_mfw_tlv_iscsi iscsi; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 1873cfc08715..6c16158d8090 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -215,6 +215,23 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: *tlv_group = QED_MFW_TLV_FCOE; break; + case DRV_TLV_TARGET_LLMNR_ENABLED: + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + case DRV_TLV_AUTHENTICATION_METHOD: + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + case DRV_TLV_MAX_FRAME_SIZE: + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_ISCSI_BOOT_PROGRESS: + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_ISCSI; + break; default: return -EINVAL; } @@ -1054,6 +1071,109 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, return -1; } +static int +qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_iscsi *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_TARGET_LLMNR_ENABLED: + if (p_drv_buf->target_llmnr_set) { + p_buf->p_val = &p_drv_buf->target_llmnr; + return sizeof(p_drv_buf->target_llmnr); + } + break; + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + if (p_drv_buf->header_digest_set) { + p_buf->p_val = &p_drv_buf->header_digest; + return sizeof(p_drv_buf->header_digest); + } + break; + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + if (p_drv_buf->data_digest_set) { + p_buf->p_val = &p_drv_buf->data_digest; + return sizeof(p_drv_buf->data_digest); + } + break; + case DRV_TLV_AUTHENTICATION_METHOD: + if (p_drv_buf->auth_method_set) { + p_buf->p_val = &p_drv_buf->auth_method; + return sizeof(p_drv_buf->auth_method); + } + break; + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + if (p_drv_buf->boot_taget_portal_set) { + p_buf->p_val = &p_drv_buf->boot_taget_portal; + return sizeof(p_drv_buf->boot_taget_portal); + } + break; + case DRV_TLV_MAX_FRAME_SIZE: + if (p_drv_buf->frame_size_set) { + p_buf->p_val = &p_drv_buf->frame_size; + return sizeof(p_drv_buf->frame_size); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_desc_size_set) { + p_buf->p_val = &p_drv_buf->tx_desc_size; + return sizeof(p_drv_buf->tx_desc_size); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_desc_size_set) { + p_buf->p_val = &p_drv_buf->rx_desc_size; + return sizeof(p_drv_buf->rx_desc_size); + } + break; + case DRV_TLV_ISCSI_BOOT_PROGRESS: + if (p_drv_buf->boot_progress_set) { + p_buf->p_val = &p_drv_buf->boot_progress; + return sizeof(p_drv_buf->boot_progress); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_desc_qdepth; + return sizeof(p_drv_buf->tx_desc_qdepth); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_desc_qdepth; + return sizeof(p_drv_buf->rx_desc_qdepth); + } + break; + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, u8 tlv_group, u8 *p_mfw_buf, u32 size) { @@ -1097,6 +1217,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, len = qed_mfw_get_fcoe_tlv_value(&tlv, &p_tlv_data->fcoe, &buffer); + else + len = qed_mfw_get_iscsi_tlv_value(&tlv, + &p_tlv_data->iscsi, + &buffer); if (len > 0) { WARN(len > 4 * tlv.tlv_length, @@ -1179,6 +1303,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) tlv_group &= ~QED_MFW_TLV_FCOE; } + if ((tlv_group & QED_MFW_TLV_ISCSI) && + p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping iSCSI TLVs for non-iSCSI function\n"); + tlv_group &= ~QED_MFW_TLV_ISCSI; + } + /* Update the TLV values in the local buffer */ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { if (tlv_group & id) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 74c2b9ad8afa..92b5352287df 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -412,6 +412,42 @@ struct qed_mfw_tlv_fcoe { struct qed_mfw_tlv_time scsi_chk_tstamp[5]; }; +struct qed_mfw_tlv_iscsi { + u8 target_llmnr; + bool target_llmnr_set; + u8 header_digest; + bool header_digest_set; + u8 data_digest; + bool data_digest_set; + u8 auth_method; +#define QED_MFW_TLV_AUTH_METHOD_NONE (1) +#define QED_MFW_TLV_AUTH_METHOD_CHAP (2) +#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3) + bool auth_method_set; + u16 boot_taget_portal; + bool boot_taget_portal_set; + u16 frame_size; + bool frame_size_set; + u16 tx_desc_size; + bool tx_desc_size_set; + u16 rx_desc_size; + bool rx_desc_size_set; + u8 boot_progress; + bool boot_progress_set; + u16 tx_desc_qdepth; + bool tx_desc_qdepth_set; + u16 rx_desc_qdepth; + bool rx_desc_qdepth_set; + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) -- cgit v1.2.3 From 59ccf86fe69a6a77afebe706913d6b551d84d5bc Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:41 -0700 Subject: qed: Add driver infrastucture for handling mfw requests. MFW requests the TLVs in interrupt context. Extracting of the required data from upper layers and populating of the TLVs require process context. The patch adds work-queues for processing the tlv requests. It also adds the implementation for requesting the tlv values from appropriate protocol driver. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 8 ++ drivers/net/ethernet/qlogic/qed/qed_main.c | 151 ++++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 2 + include/linux/qed/qed_if.h | 10 ++ 4 files changed, 170 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index dfdbe529e2bf..00db3401b898 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -515,6 +515,10 @@ struct qed_simd_fp_handler { void (*func)(void *); }; +enum qed_slowpath_wq_flag { + QED_SLOWPATH_MFW_TLV_REQ, +}; + struct qed_hwfn { struct qed_dev *cdev; u8 my_id; /* ID inside the PF */ @@ -644,6 +648,9 @@ struct qed_hwfn { #endif struct z_stream_s *stream; + struct workqueue_struct *slowpath_wq; + struct delayed_work slowpath_task; + unsigned long slowpath_task_flags; }; struct pci_params { @@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_tlv_req(struct qed_hwfn *hwfn); int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index cbf0ea97fc8e..68c4399ffd50 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +static void qed_slowpath_wq_stop(struct qed_dev *cdev) +{ + int i; + + if (IS_VF(cdev)) + return; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].slowpath_wq) + continue; + + flush_workqueue(cdev->hwfns[i].slowpath_wq); + destroy_workqueue(cdev->hwfns[i].slowpath_wq); + } +} + +static void qed_slowpath_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + slowpath_task.work); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + return; + } + + if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, + &hwfn->slowpath_task_flags)) + qed_mfw_process_tlv_req(hwfn, ptt); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_slowpath_wq_start(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + char name[NAME_SIZE]; + int i; + + if (IS_VF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + + snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + + hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); + if (!hwfn->slowpath_wq) { + DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + } + + return 0; +} + static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { @@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (qed_iov_wq_start(cdev)) goto err; + if (qed_slowpath_wq_start(cdev)) + goto err; + if (IS_PF(cdev)) { rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, &cdev->pdev->dev); @@ -1095,6 +1160,8 @@ err: qed_iov_wq_stop(cdev, false); + qed_slowpath_wq_stop(cdev); + return rc; } @@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (!cdev) return -ENODEV; + qed_slowpath_wq_stop(cdev); + qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { @@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev, } } +int qed_mfw_tlv_req(struct qed_hwfn *hwfn) +{ + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Scheduling slowpath task [Flag: %d]\n", + QED_SLOWPATH_MFW_TLV_REQ); + smp_mb__before_atomic(); + set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + + return 0; +} + +static void +qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) +{ + struct qed_common_cb_ops *op = cdev->protocol_ops.common; + struct qed_eth_stats_common *p_common; + struct qed_generic_tlvs gen_tlvs; + struct qed_eth_stats stats; + int i; + + memset(&gen_tlvs, 0, sizeof(gen_tlvs)); + op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); + + if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) + tlv->flags.ipv4_csum_offload = true; + if (gen_tlvs.feat_flags & QED_TLV_LSO) + tlv->flags.lso_supported = true; + tlv->flags.b_set = true; + + for (i = 0; i < QED_TLV_MAC_COUNT; i++) { + if (is_valid_ether_addr(gen_tlvs.mac[i])) { + ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); + tlv->mac_set[i] = true; + } + } + + qed_get_vport_stats(cdev, &stats); + p_common = &stats.common; + tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + tlv->rx_frames_set = true; + tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + + p_common->rx_bcast_bytes; + tlv->rx_bytes_set = true; + tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + + p_common->tx_bcast_pkts; + tlv->tx_frames_set = true; + tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + + p_common->tx_bcast_bytes; + tlv->rx_bytes_set = true; +} + int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, union qed_mfw_tlv_data *tlv_buf) { - return -EINVAL; + struct qed_dev *cdev = hwfn->cdev; + struct qed_common_cb_ops *ops; + + ops = cdev->protocol_ops.common; + if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { + DP_NOTICE(hwfn, "Can't collect TLV management info\n"); + return -EINVAL; + } + + switch (type) { + case QED_MFW_TLV_GENERIC: + qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); + break; + case QED_MFW_TLV_ETH: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); + break; + case QED_MFW_TLV_FCOE: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); + break; + case QED_MFW_TLV_ISCSI: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); + break; + default: + break; + } + + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e80f5e7c7992..2612e3e458d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_S_TAG_UPDATE: qed_mcp_update_stag(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_TLV_REQ: + qed_mfw_tlv_req(p_hwfn); break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 92b5352287df..44af652e7407 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -751,6 +751,14 @@ struct qed_int_info { u8 used_cnt; }; +struct qed_generic_tlvs { +#define QED_TLV_IP_CSUM BIT(0) +#define QED_TLV_LSO BIT(1) + u16 feat_flags; +#define QED_TLV_MAC_COUNT 3 + u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; +}; + #define QED_NVM_SIGNATURE 0x12435687 enum qed_nvm_flash_cmd { @@ -765,6 +773,8 @@ struct qed_common_cb_ops { void (*link_update)(void *dev, struct qed_link_output *link); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); + void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); + void (*get_protocol_tlv_data)(void *dev, void *data); }; struct qed_selftest_ops { -- cgit v1.2.3 From d25b859ccd614d2397569b833491372d129d1982 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 22 May 2018 00:28:42 -0700 Subject: qede: Add support for populating ethernet TLVs. This patch adds callbacks for providing the ethernet protocol driver TLVs. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 101 +++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7abaf2740530..9e70f713c3c5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_get_eth_tlv_data(void *edev, void *data); +static void qede_get_generic_tlv_data(void *edev, + struct qed_generic_tlvs *data); /* The qede lock is used to protect driver state change and driver flows that * are not reentrant. @@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .get_generic_tlv_data = qede_get_generic_tlv_data, + .get_protocol_tlv_data = qede_get_eth_tlv_data, }, .force_mac = qede_force_mac, .ports_update = qede_udp_ports_update, @@ -2131,3 +2136,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } + +static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + if (netif_xmit_stopped(netdev_txq)) + return true; + + return false; +} + +static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qede_dev *edev = dev; + struct netdev_hw_addr *ha; + int i; + + if (edev->ndev->features & NETIF_F_IP_CSUM) + data->feat_flags |= QED_TLV_IP_CSUM; + if (edev->ndev->features & NETIF_F_TSO) + data->feat_flags |= QED_TLV_LSO; + + ether_addr_copy(data->mac[0], edev->ndev->dev_addr); + memset(data->mac[1], 0, ETH_ALEN); + memset(data->mac[2], 0, ETH_ALEN); + /* Copy the first two UC macs */ + netif_addr_lock_bh(edev->ndev); + i = 1; + netdev_for_each_uc_addr(ha, edev->ndev) { + ether_addr_copy(data->mac[i++], ha->addr); + if (i == QED_TLV_MAC_COUNT) + break; + } + + netif_addr_unlock_bh(edev->ndev); +} + +static void qede_get_eth_tlv_data(void *dev, void *data) +{ + struct qed_mfw_tlv_eth *etlv = data; + struct qede_dev *edev = dev; + struct qede_fastpath *fp; + int i; + + etlv->lso_maxoff_size = 0XFFFF; + etlv->lso_maxoff_size_set = true; + etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; + etlv->lso_minseg_size_set = true; + etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); + etlv->prom_mode_set = true; + etlv->tx_descr_size = QEDE_TSS_COUNT(edev); + etlv->tx_descr_size_set = true; + etlv->rx_descr_size = QEDE_RSS_COUNT(edev); + etlv->rx_descr_size_set = true; + etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; + etlv->iov_offload_set = true; + + /* Fill information regarding queues; Should be done under the qede + * lock to guarantee those don't change beneath our feet. + */ + etlv->txqs_empty = true; + etlv->rxqs_empty = true; + etlv->num_txqs_full = 0; + etlv->num_rxqs_full = 0; + + __qede_lock(edev); + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod) + etlv->txqs_empty = false; + if (qede_is_txq_full(edev, fp->txq)) + etlv->num_txqs_full++; + } + if (fp->type & QEDE_FASTPATH_RX) { + if (qede_has_rx_work(fp->rxq)) + etlv->rxqs_empty = false; + + /* This one is a bit tricky; Firmware might stop + * placing packets if ring is not yet full. + * Give an approximation. + */ + if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - + qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > + RX_RING_SIZE - 100) + etlv->num_rxqs_full++; + } + } + __qede_unlock(edev); + + etlv->txqs_empty_set = true; + etlv->rxqs_empty_set = true; + etlv->num_txqs_full_set = true; + etlv->num_rxqs_full_set = true; +} -- cgit v1.2.3 From 0d45d3fe42efc76b6c4f5a62f8d110c7a2e6f83f Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 15 May 2018 12:08:14 +0200 Subject: mt76x2: apply coverage class on slot time too According to 802.11-2007 17.3.8.6 (slot time), the slot time should be increased by 3 us * coverage class. Taking into account coverage class in slot time configuration allows to increase by an order of magnitude the throughput on a 4Km link in a noisy environment Tested-by: Luca Bisti Tested-by: Gaetano Catalli Signed-off-by: Lorenzo Bianconi Acked-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt76x2_main.c | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index dd4c1127797e..276c6b6bec95 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -482,7 +482,10 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) { u8 ackto, sifs, slottime = dev->slottime; + /* As defined by IEEE 802.11-2007 17.3.8.6 */ slottime += 3 * dev->coverage_class; + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 81c58f865c64..539dda8a59e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -247,8 +247,7 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int slottime = info->use_short_slot ? 9 : 20; dev->slottime = slottime; - mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, - MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + mt76x2_set_tx_ackto(dev); } mutex_unlock(&dev->mutex); -- cgit v1.2.3 From d98fb328ad103d12c1ebaea92526e4f8670e0fec Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 15 May 2018 14:33:22 +0200 Subject: mt76: fix sending encrypted broadcast packets for secondary interfaces For encryption to work properly, the BSS index needs to be initialized for the WCID entry used for the interface. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 3 +++ drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt76x2_main.c | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index a5d1255e4b9c..dc12bbdbb2ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -39,6 +39,9 @@ #define MT_CALIBRATE_INTERVAL HZ +#define MT_MAX_VIFS 8 +#define MT_VIF_WCID(_n) (254 - ((_n) & 7)) + #include "mt76.h" #include "mt76x2_regs.h" #include "mt76x2_mac.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 276c6b6bec95..b6f27b949566 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -296,6 +296,9 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) for (i = 0; i < 256; i++) mt76x2_mac_wcid_setup(dev, i, 0, NULL); + for (i = 0; i < MT_MAX_VIFS; i++) + mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); + for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) mt76x2_mac_shared_key_setup(dev, i, k, NULL); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 539dda8a59e2..826c2431e31a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -104,7 +104,7 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) idx += 8; mvif->idx = idx; - mvif->group_wcid.idx = 254 - idx; + mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; mt76x2_txq_init(dev, vif->txq); -- cgit v1.2.3 From 66a77cbe63eb326513632cc88e1260e877be8123 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 15 May 2018 14:33:23 +0200 Subject: mt76: discard early received packets if not running yet If the radio was previously in running state, it can receive some packets before it is able to process them. This can lead to a crash if the channel is not initialized yet. Discard all rx packets until start() is called Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_mac.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index dab713756004..b49aea4da2d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -301,6 +301,9 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, u8 wcid; int len; + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return -EINVAL; + if (rxinfo & MT_RXINFO_L2PAD) pad_len += 2; -- cgit v1.2.3 From 89bc67e3a93ae6199ae02c9e4fe41833da9ba368 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 20 May 2018 07:43:45 +0200 Subject: mt76: only stop tx queues on offchannel, not during the entire scan During scans, mac80211 frequently switches back to the home channel to minimize interruption of ongoing traffic. Keep regular tx queues active during that time. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 5 +++++ drivers/net/wireless/mediatek/mt76/mt76.h | 1 + drivers/net/wireless/mediatek/mt76/tx.c | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 915e61733131..d862e5efd094 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -366,6 +366,11 @@ void mt76_set_channel(struct mt76_dev *dev) struct mt76_channel_state *state; bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; + if (offchannel) + set_bit(MT76_OFFCHANNEL, &dev->state); + else + clear_bit(MT76_OFFCHANNEL, &dev->state); + if (dev->drv->update_survey) dev->drv->update_survey(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index a74e6eef51e9..2d098fac6147 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -189,6 +189,7 @@ enum { MT76_STATE_RUNNING, MT76_SCANNING, MT76_RESET, + MT76_OFFCHANNEL, }; struct mt76_hw_cap { diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 7ecd2d7c5db4..e96956710fb2 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -332,7 +332,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (probe) break; - if (test_bit(MT76_SCANNING, &dev->state) || + if (test_bit(MT76_OFFCHANNEL, &dev->state) || test_bit(MT76_RESET, &dev->state)) return -EBUSY; @@ -385,7 +385,7 @@ restart: bool empty = false; int cur; - if (test_bit(MT76_SCANNING, &dev->state) || + if (test_bit(MT76_OFFCHANNEL, &dev->state) || test_bit(MT76_RESET, &dev->state)) return -EBUSY; -- cgit v1.2.3 From a164a94212ceb606d40e92db7a58e9dbcb3ff74e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 20 May 2018 07:43:46 +0200 Subject: mt76: prevent tx scheduling during channel change Re-schedule tx afterwards Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 826c2431e31a..ce90ff999b49 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -124,11 +124,14 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) { int ret; + cancel_delayed_work_sync(&dev->cal_work); + + set_bit(MT76_RESET, &dev->mt76.state); + mt76_set_channel(&dev->mt76); tasklet_disable(&dev->pre_tbtt_tasklet); tasklet_disable(&dev->dfs_pd.dfs_tasklet); - cancel_delayed_work_sync(&dev->cal_work); mt76x2_mac_stop(dev, true); ret = mt76x2_phy_set_channel(dev, chandef); @@ -143,6 +146,10 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) tasklet_enable(&dev->dfs_pd.dfs_tasklet); tasklet_enable(&dev->pre_tbtt_tasklet); + clear_bit(MT76_RESET, &dev->mt76.state); + + mt76_txq_schedule_all(&dev->mt76); + return ret; } @@ -452,7 +459,6 @@ mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) clear_bit(MT76_SCANNING, &dev->mt76.state); tasklet_enable(&dev->pre_tbtt_tasklet); - mt76_txq_schedule_all(&dev->mt76); } static void -- cgit v1.2.3 From a85b590cf55f0789efcdd347b69c9e8ad6c3dcc7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 20 May 2018 07:43:47 +0200 Subject: mt76: move ieee80211_hw allocation to common core Allows it to be shared between different drivers and locks to be initialized earlier Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 22 ++++++++++++++++++++-- drivers/net/wireless/mediatek/mt76/mt76.h | 2 ++ drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 14 ++++++-------- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d862e5efd094..d1044e5b25db 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -268,6 +268,26 @@ mt76_check_sband(struct mt76_dev *dev, int band) dev->hw->wiphy->bands[band] = NULL; } +struct mt76_dev * +mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) +{ + struct ieee80211_hw *hw; + struct mt76_dev *dev; + + hw = ieee80211_alloc_hw(size, ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->hw = hw; + spin_lock_init(&dev->rx_lock); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->cc_lock); + + return dev; +} +EXPORT_SYMBOL_GPL(mt76_alloc_device); + int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates) { @@ -277,8 +297,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, dev_set_drvdata(dev->dev, dev); - spin_lock_init(&dev->lock); - spin_lock_init(&dev->cc_lock); INIT_LIST_HEAD(&dev->txwi_cache); SET_IEEE80211_DEV(hw, dev->dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 2d098fac6147..bb158f867d7c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -377,6 +377,8 @@ mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) return &msband->chan[idx]; } +struct mt76_dev *mt76_alloc_device(unsigned int size, + const struct ieee80211_ops *ops); int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates); void mt76_unregister_device(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index b6f27b949566..ec1715639a04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -638,20 +638,18 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) .rx_poll_complete = mt76x2_rx_poll_complete, .sta_ps = mt76x2_sta_ps, }; - struct ieee80211_hw *hw; struct mt76x2_dev *dev; + struct mt76_dev *mdev; - hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops); - if (!hw) + mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops); + if (!mdev) return NULL; - dev = hw->priv; - dev->mt76.dev = pdev; - dev->mt76.hw = hw; - dev->mt76.drv = &drv_ops; + dev = container_of(mdev, struct mt76x2_dev, mt76); + mdev->dev = pdev; + mdev->drv = &drv_ops; mutex_init(&dev->mutex); spin_lock_init(&dev->irq_lock); - spin_lock_init(&dev->mt76.rx_lock); return dev; } -- cgit v1.2.3 From 26e40d4c0b52c60f13b074d9d6144eb3ac7fb61b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 20 May 2018 07:43:48 +0200 Subject: mt76: wait for pending tx to complete before switching channel Reduces interruption caused by scanning Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/dma.c | 4 ++++ drivers/net/wireless/mediatek/mt76/mac80211.c | 16 ++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt76.h | 2 ++ 3 files changed, 22 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 3518703524e7..3dbedcedc2c4 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -178,6 +178,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) mt76_dma_sync_idx(dev, q); wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + + if (!q->queued) + wake_up(&dev->tx_wait); + spin_unlock_bh(&q->lock); if (wake) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d1044e5b25db..fcd079a96782 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -283,6 +283,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->cc_lock); + init_waitqueue_head(&dev->tx_wait); return dev; } @@ -377,18 +378,33 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(mt76_rx); +static bool mt76_has_tx_pending(struct mt76_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { + if (dev->q_tx[i].queued) + return true; + } + + return false; +} + void mt76_set_channel(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; struct cfg80211_chan_def *chandef = &hw->conf.chandef; struct mt76_channel_state *state; bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; + int timeout = HZ / 5; if (offchannel) set_bit(MT76_OFFCHANNEL, &dev->state); else clear_bit(MT76_OFFCHANNEL, &dev->state); + wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout); + if (dev->drv->update_survey) dev->drv->update_survey(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index bb158f867d7c..d2166fbf50ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -251,6 +251,8 @@ struct mt76_dev { struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; + wait_queue_head_t tx_wait; + u8 macaddr[ETH_ALEN]; u32 rev; unsigned long state; -- cgit v1.2.3 From cbec83d40cc79137b008d0dd44846c5f2146a83d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 20 May 2018 07:43:49 +0200 Subject: mt76: use udelay instead of usleep_range in mt76x2_mac_stop usleep_range can cause excessive latency on channel change if waiting for the MAC to stop fails. It will be forced to stop by the code following that loop anyway. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index ec1715639a04..79ab93613e06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -376,7 +376,7 @@ void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) if ((mt76_rr(dev, MT_MAC_STATUS) & (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || mt76_rr(dev, MT_BBP(IBI, 12))) { - usleep_range(10, 20); + udelay(1); continue; } -- cgit v1.2.3 From 30bfce0b63fa68c14ae1613eb9d259fa18644074 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Fri, 18 May 2018 15:38:54 +0800 Subject: mwifiex: correct histogram data with appropriate index Correct snr/nr/rssi data index to avoid possible buffer underflow. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/util.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cd68ffc2c74..51ccf10f4413 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr) { struct mwifiex_histogram_data *phist_data = priv->hist_data; + s8 nf = -nflr; + s8 rssi = snr - nflr; atomic_inc(&phist_data->num_samples); atomic_inc(&phist_data->rx_rate[rx_rate]); - atomic_inc(&phist_data->snr[snr]); - atomic_inc(&phist_data->noise_flr[128 + nflr]); - atomic_inc(&phist_data->sig_str[nflr - snr]); + atomic_inc(&phist_data->snr[snr + 128]); + atomic_inc(&phist_data->noise_flr[nf + 128]); + atomic_inc(&phist_data->sig_str[rssi + 128]); } /* function to reset histogram data during init/reset */ -- cgit v1.2.3 From 88001968245c42c26416476bf0ef960442371605 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 14 May 2018 08:48:20 +0200 Subject: brcmfmac: add debugfs entry for reading firmware capabilities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows reading all capabilities as reported by a firmware. They are printed using native (raw) names, just like developers like it the most. It's how firmware reports support for various features, e.g. supported modes, supported standards, power saving details, max BSS-es. Access to all that info is useful for trying new firmwares, comparing them and debugging features AKA bugs. Signed-off-by: Rafał Miłecki Reviewed-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/feature.c | 36 ++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 876731c57bf5..800a423c7bc2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -165,6 +165,41 @@ static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) } } +/** + * brcmf_feat_fwcap_debugfs_read() - expose firmware capabilities to debugfs. + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + */ +static int brcmf_feat_fwcap_debugfs_read(struct seq_file *seq, void *data) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); + struct brcmf_if *ifp = brcmf_get_ifp(bus_if->drvr, 0); + char caps[MAX_CAPS_BUFFER_SIZE + 1] = { }; + char *tmp; + int err; + + err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + if (err) { + brcmf_err("could not get firmware cap (%d)\n", err); + return err; + } + + /* Put every capability in a new line */ + for (tmp = caps; *tmp; tmp++) { + if (*tmp == ' ') + *tmp = '\n'; + } + + /* Usually there is a space at the end of capabilities string */ + seq_printf(seq, "%s", caps); + /* So make sure we don't print two line breaks */ + if (tmp > caps && *(tmp - 1) != '\n') + seq_printf(seq, "\n"); + + return 0; +} + void brcmf_feat_attach(struct brcmf_pub *drvr) { struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); @@ -233,6 +268,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) void brcmf_feat_debugfs_create(struct brcmf_pub *drvr) { brcmf_debugfs_add_entry(drvr, "features", brcmf_feat_debugfs_read); + brcmf_debugfs_add_entry(drvr, "fwcap", brcmf_feat_fwcap_debugfs_read); } bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id) -- cgit v1.2.3 From abd39c6ded9db53aa44c2540092bdd5fb6590fa8 Mon Sep 17 00:00:00 2001 From: Sanjay Konduri Date: Tue, 15 May 2018 14:34:30 +0530 Subject: rsi: add fix for crash during assertions Observed crash in some scenarios when assertion has occurred, this is because hw structure is freed and is tried to get accessed in some functions where null check is already present. So, avoided the crash by making the hw to NULL after freeing. Signed-off-by: Sanjay Konduri Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 3faa0449a5ef..bfa7569c85bb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -245,6 +245,7 @@ void rsi_mac80211_detach(struct rsi_hw *adapter) ieee80211_stop_queues(hw); ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); + adapter->hw = NULL; } for (band = 0; band < NUM_NL80211_BANDS; band++) { -- cgit v1.2.3 From eeed833aaa38712efc7c41d9d6bd5afc8c13c55b Mon Sep 17 00:00:00 2001 From: Sanjay Konduri Date: Tue, 15 May 2018 14:34:31 +0530 Subject: rsi: add fix for corruption of auto rate table Auto rate table sent to firmware is getting corrupted as memset to zeros is not done. Added memset to skb data before filling auto rate table. Signed-off-by: Sanjay Konduri Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 0757adcb3f4a..d0e5937cad6d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1190,6 +1190,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, return -ENOMEM; } + memset(skb->data, 0, frame_len); selected_rates = kzalloc(2 * RSI_TBL_SZ, GFP_KERNEL); if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", -- cgit v1.2.3 From d4e36e5554eb92f3ec7fedad3efb602570584df4 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 20 Apr 2018 13:49:25 +0300 Subject: mac80211: Support adding duration for prepare_tx() callback There are specific cases, such as SAE authentication exchange, that might require long duration to complete. For such cases, add support for indicating to the driver the required duration of the prepare_tx() operation, so the driver would still be able to complete the frame exchange. Currently, indicate the duration only for SAE authentication exchange, as SAE authentication can take up to 2000 msec (as defined in IEEE P802.11-REVmd D1.0 p. 3504). As the patch modified the prepare_tx() callback API, also modify the relevant code in iwlwifi. Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath9k/main.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 +++++- include/net/mac80211.h | 5 ++++- net/mac80211/driver-ops.h | 8 +++++--- net/mac80211/mlme.c | 17 +++++++++------ net/mac80211/trace.h | 25 ++++++++++++++++++++--- 6 files changed, 49 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a3be8add56e1..b6663c80e7dd 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2544,7 +2544,8 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, } static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u16 duration) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5f0701c992a4..ec91dd90acfd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2844,7 +2844,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u16 req_duration) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; @@ -2857,6 +2858,9 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) return; + if (req_duration > duration) + duration = req_duration; + mutex_lock(&mvm->mutex); /* Try really hard to protect the session and hear a beacon */ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 604d738a2128..851a5e19ae32 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3378,6 +3378,8 @@ enum ieee80211_reconfig_type { * frame in case that no beacon was heard from the AP/P2P GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. + * If duration is greater than zero, mac80211 hints to the driver the + * duration for which the operation is requested. * The callback is optional and can (should!) sleep. * * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending @@ -3697,7 +3699,8 @@ struct ieee80211_ops { u32 sset, u8 *data); void (*mgd_prepare_tx)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + u16 duration); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4d82fe7d627c..8f6998091d26 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -2,6 +2,7 @@ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH +* Copyright (C) 2018 Intel Corporation */ #ifndef __MAC80211_DRIVER_OPS @@ -813,7 +814,8 @@ drv_allow_buffered_frames(struct ieee80211_local *local, } static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) + struct ieee80211_sub_if_data *sdata, + u16 duration) { might_sleep(); @@ -821,9 +823,9 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); - trace_drv_mgd_prepare_tx(local, sdata); + trace_drv_mgd_prepare_tx(local, sdata, duration); if (local->ops->mgd_prepare_tx) - local->ops->mgd_prepare_tx(&local->hw, &sdata->vif); + local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, duration); trace_drv_return_void(local); } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 233068756502..a59187c016e0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -864,7 +864,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) return; } - drv_mgd_prepare_tx(local, sdata); + drv_mgd_prepare_tx(local, sdata, 0); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) @@ -2022,7 +2022,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, */ if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) && !ifmgd->have_beacon) - drv_mgd_prepare_tx(sdata->local, sdata); + drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype, reason, tx, frame_buf); @@ -2560,7 +2560,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, if (!elems.challenge) return; auth_data->expected_transaction = 4; - drv_mgd_prepare_tx(sdata->local, sdata); + drv_mgd_prepare_tx(sdata->local, sdata, 0); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; @@ -3769,6 +3769,7 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) u32 tx_flags = 0; u16 trans = 1; u16 status = 0; + u16 prepare_tx_duration = 0; sdata_assert_lock(sdata); @@ -3790,7 +3791,11 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) return -ETIMEDOUT; } - drv_mgd_prepare_tx(local, sdata); + if (auth_data->algorithm == WLAN_AUTH_SAE) + prepare_tx_duration = + jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE); + + drv_mgd_prepare_tx(local, sdata, prepare_tx_duration); sdata_info(sdata, "send auth to %pM (try %d/%d)\n", auth_data->bss->bssid, auth_data->tries, @@ -4994,7 +4999,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); - drv_mgd_prepare_tx(sdata->local, sdata); + drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, @@ -5014,7 +5019,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); - drv_mgd_prepare_tx(sdata->local, sdata); + drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 591ad02e1fa4..80a7edf8d314 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2,6 +2,7 @@ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH +* Copyright (C) 2018 Intel Corporation */ #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) @@ -1413,11 +1414,29 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames, TP_ARGS(local, sta, tids, num_frames, reason, more_data) ); -DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx, +TRACE_EVENT(drv_mgd_prepare_tx, TP_PROTO(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata), + struct ieee80211_sub_if_data *sdata, + u16 duration), - TP_ARGS(local, sdata) + TP_ARGS(local, sdata, duration), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u32, duration) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->duration = duration; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " duration: %u", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration + ) ); DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover, -- cgit v1.2.3 From f8793c26fe586659d6da3fa277e63961a69d314b Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Wed, 16 May 2018 14:11:58 +0200 Subject: brcmfmac: move ALLFFMAC variable in flowring module The only user of ALLFFMAC is the flowring module so no need to expose it in a header file. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c | 2 -- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h | 2 -- drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c | 2 ++ 3 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 105b8774fca9..cd3651069d0c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -36,8 +36,6 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); MODULE_LICENSE("Dual BSD/GPL"); -const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index ef914619e8e1..a34642cb4d2f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -19,8 +19,6 @@ #include #include "fwil_types.h" -extern const u8 ALLFFMAC[ETH_ALEN]; - #define BRCMF_FW_ALTPATH_LEN 256 /* Definitions for the module global and device specific settings are defined diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index d0b738da2458..d0d8b32af7d0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c @@ -46,6 +46,8 @@ static const u8 brcmf_flowring_prio2fifo[] = { 3 }; +static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + static bool brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) -- cgit v1.2.3 From 8e072168f75ebce85b96cbcefea2b10ddbd5913f Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Wed, 16 May 2018 14:11:59 +0200 Subject: brcmfmac: add support for sysfs initiated coredump The driver already supports device coredump initiated by firmware event. Since commit 3c47d19ff4dc ("drivers: base: add coredump driver ops") it is also possible to initiate it from user-space through sysfs. This patch adds support for SDIO and PCIe devices. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h | 2 ++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 8 ++++++++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 1 + 4 files changed, 12 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index a1915411c280..d2f788d88668 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1165,6 +1165,7 @@ static struct sdio_driver brcmf_sdmmc_driver = { #ifdef CONFIG_PM_SLEEP .pm = &brcmf_sdio_pm_ops, #endif /* CONFIG_PM_SLEEP */ + .coredump = brcmf_dev_coredump, }, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 27e693e93f21..c4965184cdf3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -250,6 +250,8 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); void brcmf_detach(struct device *dev); /* Indication from bus module that dongle should be reset */ void brcmf_dev_reset(struct device *dev); +/* Request from bus module to initiate a coredump */ +void brcmf_dev_coredump(struct device *dev); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 8d4511eaa9b9..72954fd6df3b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1180,6 +1180,14 @@ void brcmf_dev_reset(struct device *dev) brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1); } +void brcmf_dev_coredump(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + + if (brcmf_debug_create_memdump(bus_if, NULL, 0) < 0) + brcmf_dbg(TRACE, "failed to create coredump\n"); +} + void brcmf_detach(struct device *dev) { s32 i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f0797aeada67..5baa8372371d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -2044,6 +2044,7 @@ static struct pci_driver brcmf_pciedrvr = { #ifdef CONFIG_PM .driver.pm = &brcmf_pciedrvr_pm, #endif + .driver.coredump = brcmf_dev_coredump, }; -- cgit v1.2.3 From 21c5c83ce83359f0ab930824c67984575c051550 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Wed, 16 May 2018 14:12:00 +0200 Subject: mwifiex: support sysfs initiated device coredump Since commit 3c47d19ff4dc ("drivers: base: add coredump driver ops") it is possible to initiate a device coredump from user-space. This patch adds support for it adding the .coredump() driver callback. As there is no longer a need to initiate it through debugfs remove that code. Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 1 + drivers/net/wireless/marvell/mwifiex/debugfs.c | 31 +------------------------- drivers/net/wireless/marvell/mwifiex/pcie.c | 18 +++++++++++++-- drivers/net/wireless/marvell/mwifiex/sdio.c | 12 ++++++++++ drivers/net/wireless/marvell/mwifiex/usb.c | 13 +++++++++++ 5 files changed, 43 insertions(+), 32 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 9cfcdf6bec52..8be1e69657a7 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -674,6 +674,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, return ret; } +EXPORT_SYMBOL_GPL(mwifiex_send_cmd); /* * This function queues a command to the command pending queue. diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index db2872daae97..07453932f703 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -153,34 +153,6 @@ free_and_exit: return ret; } -/* - * Proc device dump read handler. - * - * This function is called when the 'device_dump' file is opened for - * reading. - * This function dumps driver information and firmware memory segments - * (ex. DTCM, ITCM, SQRAM etc.) for - * debugging. - */ -static ssize_t -mwifiex_device_dump_read(struct file *file, char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct mwifiex_private *priv = file->private_data; - - /* For command timeouts, USB firmware will automatically emit - * firmware dump events, so we don't implement device_dump(). - * For user-initiated dumps, we trigger it ourselves. - */ - if (priv->adapter->iface_type == MWIFIEX_USB) - mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT, - HostCmd_ACT_GEN_SET, 0, NULL, true); - else - priv->adapter->if_ops.device_dump(priv->adapter); - - return 0; -} - /* * Proc getlog file read handler. * @@ -980,7 +952,6 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \ MWIFIEX_DFS_FILE_READ_OPS(info); MWIFIEX_DFS_FILE_READ_OPS(debug); MWIFIEX_DFS_FILE_READ_OPS(getlog); -MWIFIEX_DFS_FILE_READ_OPS(device_dump); MWIFIEX_DFS_FILE_OPS(regrdwr); MWIFIEX_DFS_FILE_OPS(rdeeprom); MWIFIEX_DFS_FILE_OPS(memrw); @@ -1011,7 +982,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(getlog); MWIFIEX_DFS_ADD_FILE(regrdwr); MWIFIEX_DFS_ADD_FILE(rdeeprom); - MWIFIEX_DFS_ADD_FILE(device_dump); + MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 7538543d46fa..0c42b7296ddd 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -320,6 +320,19 @@ static void mwifiex_pcie_shutdown(struct pci_dev *pdev) return; } +static void mwifiex_pcie_coredump(struct device *dev) +{ + struct pci_dev *pdev; + struct pcie_service_card *card; + + pdev = container_of(dev, struct pci_dev, dev); + card = pci_get_drvdata(pdev); + + if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, + &card->work_flags)) + schedule_work(&card->work); +} + static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, @@ -415,11 +428,12 @@ static struct pci_driver __refdata mwifiex_pcie = { .id_table = mwifiex_ids, .probe = mwifiex_pcie_probe, .remove = mwifiex_pcie_remove, -#ifdef CONFIG_PM_SLEEP .driver = { + .coredump = mwifiex_pcie_coredump, +#ifdef CONFIG_PM_SLEEP .pm = &mwifiex_pcie_pm_ops, - }, #endif + }, .shutdown = mwifiex_pcie_shutdown, .err_handler = &mwifiex_pcie_err_handler, }; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a82880132af4..47d2dcc3f28f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -466,6 +466,17 @@ static int mwifiex_sdio_suspend(struct device *dev) return ret; } +static void mwifiex_sdio_coredump(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct sdio_mmc_card *card; + + card = sdio_get_drvdata(func); + if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, + &card->work_flags)) + schedule_work(&card->work); +} + /* Device ID for SD8786 */ #define SDIO_DEVICE_ID_MARVELL_8786 (0x9116) /* Device ID for SD8787 */ @@ -515,6 +526,7 @@ static struct sdio_driver mwifiex_sdio = { .remove = mwifiex_sdio_remove, .drv = { .owner = THIS_MODULE, + .coredump = mwifiex_sdio_coredump, .pm = &mwifiex_sdio_pm_ops, } }; diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 4bc244801636..7aa39878ce49 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -653,6 +653,16 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) usb_put_dev(interface_to_usbdev(intf)); } +static void mwifiex_usb_coredump(struct device *dev) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct usb_card_rec *card = usb_get_intfdata(intf); + + mwifiex_send_cmd(mwifiex_get_priv(card->adapter, MWIFIEX_BSS_ROLE_ANY), + HostCmd_CMD_FW_DUMP_EVENT, HostCmd_ACT_GEN_SET, 0, + NULL, true); +} + static struct usb_driver mwifiex_usb_driver = { .name = "mwifiex_usb", .probe = mwifiex_usb_probe, @@ -661,6 +671,9 @@ static struct usb_driver mwifiex_usb_driver = { .suspend = mwifiex_usb_suspend, .resume = mwifiex_usb_resume, .soft_unbind = 1, + .drvwrap.driver = { + .coredump = mwifiex_usb_coredump, + }, }; static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, -- cgit v1.2.3 From d2af9b566554e01f9ad67b330ce569dbc130e5d3 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 16 May 2018 14:12:01 +0200 Subject: brcmfmac: validate user provided data for memdump before copying In patch "brcmfmac: add support for sysfs initiated coredump", a new scenario of brcmf_debug_create_memdump was added in which the user of the function might not necessarily provide prefix data. Hence the function should not assume the data is always valid and should perform a check before copying. Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 504832084eca..489b5dfdf5b9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -40,7 +40,8 @@ int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, if (!dump) return -ENOMEM; - memcpy(dump, data, len); + if (data && len > 0) + memcpy(dump, data, len); err = brcmf_bus_get_memdump(bus, dump + len, ramsize); if (err) { vfree(dump); -- cgit v1.2.3 From 8a3ab2f38f1669e3be6433a1f6b82a077b38c4c7 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 16 May 2018 14:12:02 +0200 Subject: brcmfmac: trigger memory dump upon firmware halt signal PCIe dongle firmware signals a halt/trap through mailbox interrupt. Trigger a memory dump upon receiving such signal could help to provide useful information for issue debug. Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 5baa8372371d..45928b5b8d97 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -182,6 +182,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_D2H_DEV_D3_ACK 0x00000001 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define BRCMF_D2H_DEV_FWHALT 0x10000000 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 #define BRCMF_H2D_HOST_DS_ACK 0x00000002 @@ -717,6 +718,10 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) devinfo->mbdata_completed = true; wake_up(&devinfo->mbdata_resp_wait); } + if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) { + brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n"); + brcmf_dev_coredump(&devinfo->pdev->dev); + } } -- cgit v1.2.3 From b8248236e92790ac635caeb4156e46ea2417e037 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 16 May 2018 14:12:03 +0200 Subject: brcmfmac: trigger memory dump on SDIO firmware halt message Attempt to dump dongle memory for debug upon receiving firmware halt message through dongle to host mail box interrupt. Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 412a05b9a2b2..c99a191e8d69 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1072,8 +1072,10 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) bus->sdcnt.f1regdata += 2; /* dongle indicates the firmware has halted/crashed */ - if (hmb_data & HMB_DATA_FWHALT) + if (hmb_data & HMB_DATA_FWHALT) { brcmf_err("mailbox indicates firmware halted\n"); + brcmf_dev_coredump(&sdiod->func1->dev); + } /* Dongle recomposed rx frames, accept them again */ if (hmb_data & HMB_DATA_NAKHANDLED) { -- cgit v1.2.3 From 8f6196f63c46982c095e485a9c73c683d9900a2e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:43 -0700 Subject: nfp: move rtsym helpers to pf code nfp_net_pf_rtsym_read_optional() and nfp_net_pf_map_rtsym() are not really related to networking code. Move them to the PF code and remove the net from their names. They will soon be needed by code outside of nfp_net_main.c anyway. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 32 ++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 6 +++ drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 61 +++++------------------ 3 files changed, 51 insertions(+), 48 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 0ade122805ad..1ef3623c6e1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -75,6 +75,38 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, + unsigned int default_val) +{ + char name[256]; + int err = 0; + u64 val; + + snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + + val = nfp_rtsym_read_le(pf->rtbl, name, &err); + if (err) { + if (err == -ENOENT) + return default_val; + nfp_err(pf->cpp, "Unable to read symbol %s\n", name); + return err; + } + + return val; +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + char pf_symbol[256]; + + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, + nfp_cppcore_pcie_unit(pf->cpp)); + + return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); +} + static bool nfp_board_ready(struct nfp_pf *pf) { const char *cp; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 42211083b51f..5b028486c894 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -177,6 +177,12 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); +int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, + unsigned int default_val); +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area); + enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 45cd2092e498..f8abb4cd9cef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -101,48 +101,15 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index) return NULL; } -static int -nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, - unsigned int default_val) -{ - char name[256]; - int err = 0; - u64 val; - - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); - - val = nfp_rtsym_read_le(pf->rtbl, name, &err); - if (err) { - if (err == -ENOENT) - return default_val; - nfp_err(pf->cpp, "Unable to read symbol %s\n", name); - return err; - } - - return val; -} - static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) { - return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); + return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); } static int nfp_net_pf_get_app_id(struct nfp_pf *pf) { - return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", - NFP_APP_CORE_NIC); -} - -static u8 __iomem * -nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) -{ - char pf_symbol[256]; - - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); - - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", + NFP_APP_CORE_NIC); } static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) @@ -379,9 +346,8 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) if (!nfp_app_needs_ctrl_vnic(pf->app)) return 0; - ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", - NFP_PF_CSR_SLICE_SIZE, - &pf->ctrl_vnic_bar); + ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", + NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar); if (IS_ERR(ctrl_bar)) { nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); err = PTR_ERR(ctrl_bar); @@ -507,8 +473,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; - mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", - min_size, &pf->data_vnic_bar); + mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", + min_size, &pf->data_vnic_bar); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); return PTR_ERR(mem); @@ -528,10 +494,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", - "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * - pf->limit_vfs, &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -541,9 +506,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; - pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2", - "_pf%d_net_vf_cfg2", - min_size, &pf->vfcfg_tbl2_area); + pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", + "_pf%d_net_vf_cfg2", + min_size, &pf->vfcfg_tbl2_area); if (IS_ERR(pf->vfcfg_tbl2)) { if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { err = PTR_ERR(pf->vfcfg_tbl2); -- cgit v1.2.3 From 0c693323a1e4ba129c7ea4b9b2679eb10227a446 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:44 -0700 Subject: nfp: add support for per-PCI PF mailbox When working with devlink-related functionality for locking reasons it's easier to create a new mailbox per-PCI PF device than try to use one of the netdev/vNIC mailboxes. Define new mailbox structure and resolve its symbol during probe. For forward compatibility allow silent truncation of mailbox command data. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_abi.h | 59 ++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_main.c | 108 ++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 6 ++ 3 files changed, 173 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_abi.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h new file mode 100644 index 000000000000..ce3935d6729b --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NFP_ABI__ +#define __NFP_ABI__ 1 + +#include + +#define NFP_MBOX_SYM_NAME "_abi_nfd_pf%u_mbox" +#define NFP_MBOX_SYM_MIN_SIZE 16 /* When no data needed */ + +#define NFP_MBOX_CMD 0x00 +#define NFP_MBOX_RET 0x04 +#define NFP_MBOX_DATA_LEN 0x08 +#define NFP_MBOX_RESERVED 0x0c +#define NFP_MBOX_DATA 0x10 + +/** + * enum nfp_mbox_cmd - PF mailbox commands + * + * @NFP_MBOX_NO_CMD: null command + * Used to indicate previous command has finished. + */ +enum nfp_mbox_cmd { + NFP_MBOX_NO_CMD = 0x00, +}; + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 1ef3623c6e1c..46b76d5a726c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -55,6 +55,7 @@ #include "nfpcore/nfp6000_pcie.h" +#include "nfp_abi.h" #include "nfp_app.h" #include "nfp_main.h" #include "nfp_net.h" @@ -107,6 +108,90 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); } +/* Callers should hold the devlink instance lock */ +int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, + void *out_data, u64 out_length) +{ + unsigned long long addr; + unsigned long err_at; + u64 max_data_sz; + u32 val = 0; + u32 cpp_id; + int n, err; + + if (!pf->mbox) + return -EOPNOTSUPP; + + cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0, + pf->mbox->domain); + addr = pf->mbox->addr; + max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE; + + /* Check if cmd field is clear */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + if (err || val) { + nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", + cmd, val, err); + return err ?: -EBUSY; + } + + in_length = min(in_length, max_data_sz); + n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, + in_data, in_length); + if (n != in_length) + return -EIO; + /* Write data_len and wipe reserved */ + err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, + in_length); + if (err) + return err; + + /* Read back for ordering */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + if (err) + return err; + + /* Write cmd and wipe return value */ + err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd); + if (err) + return err; + + err_at = jiffies + 5 * HZ; + while (true) { + /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + if (err) + return err; + if (!val) + break; + + if (time_is_before_eq_jiffies(err_at)) + return -ETIMEDOUT; + + msleep(5); + } + + /* Copy output if any (could be error info, do it before reading ret) */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + if (err) + return err; + + out_length = min_t(u32, val, min(out_length, max_data_sz)); + n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, + out_data, out_length); + if (n != out_length) + return -EIO; + + /* Check if there is an error */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val); + if (err) + return err; + if (val) + return -val; + + return out_length; +} + static bool nfp_board_ready(struct nfp_pf *pf) { const char *cp; @@ -468,6 +553,25 @@ static void nfp_fw_unload(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static int nfp_pf_find_rtsyms(struct nfp_pf *pf) +{ + char pf_symbol[256]; + unsigned int pf_id; + + pf_id = nfp_cppcore_pcie_unit(pf->cpp); + + /* Optional per-PCI PF mailbox */ + snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); + pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); + if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) { + nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", + pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE); + return -EINVAL; + } + + return 0; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -542,6 +646,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); + err = nfp_pf_find_rtsyms(pf); + if (err) + goto err_fw_unload; + pf->dump_flag = NFP_DUMP_NSP_DIAG; pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 5b028486c894..e3e1fa84ccd7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -60,6 +60,7 @@ struct nfp_mip; struct nfp_net; struct nfp_nsp_identify; struct nfp_port; +struct nfp_rtsym; struct nfp_rtsym_table; /** @@ -87,6 +88,7 @@ struct nfp_dumpspec { * @vf_cfg_mem: Pointer to mapped VF configuration area * @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table * @vfcfg_tbl2: Pointer to mapped VF config table + * @mbox: RTSym of per-PCI PF mailbox (under devlink lock) * @irq_entries: Array of MSI-X entries for all vNICs * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled @@ -127,6 +129,8 @@ struct nfp_pf { struct nfp_cpp_area *vfcfg_tbl2_area; u8 __iomem *vfcfg_tbl2; + const struct nfp_rtsym *mbox; + struct msix_entry *irq_entries; unsigned int limit_vfs; @@ -182,6 +186,8 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); +int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, + void *out_data, u64 out_length); enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, -- cgit v1.2.3 From a0d163f4327febeae2c98c4b1aaff3552e5b1667 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:45 -0700 Subject: nfp: add shared buffer configuration Allow app FW to advertise its shared buffer pool information. Use the per-PF mailbox to configure them from devlink. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/nfp_abi.h | 70 ++++++++ drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 22 +++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 15 +- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 7 + .../net/ethernet/netronome/nfp/nfp_shared_buf.c | 180 +++++++++++++++++++++ 6 files changed, 294 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index d5866d708dfa..f032d645911c 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -30,6 +30,7 @@ nfp-objs := \ nfp_net_sriov.o \ nfp_netvf_main.o \ nfp_port.o \ + nfp_shared_buf.o \ nic/main.o ifeq ($(CONFIG_NFP_APP_FLOWER),y) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index ce3935d6729b..7ffa6e6a9d1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -51,9 +51,79 @@ * * @NFP_MBOX_NO_CMD: null command * Used to indicate previous command has finished. + * + * @NFP_MBOX_POOL_GET: get shared buffer pool info/config + * Input - struct nfp_shared_buf_pool_id + * Output - struct nfp_shared_buf_pool_info_get + * + * @NFP_MBOX_POOL_SET: set shared buffer pool info/config + * Input - struct nfp_shared_buf_pool_info_set + * Output - None */ enum nfp_mbox_cmd { NFP_MBOX_NO_CMD = 0x00, + + NFP_MBOX_POOL_GET = 0x01, + NFP_MBOX_POOL_SET = 0x02, +}; + +#define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt" +#define NFP_SHARED_BUF_TABLE_SYM_NAME "_abi_nfd_pf%u_sb_tbl" + +/** + * struct nfp_shared_buf - NFP shared buffer description + * @id: numerical user-visible id of the shared buffer + * @size: size in bytes of the buffer + * @ingress_pools_count: number of ingress pools + * @egress_pools_count: number of egress pools + * @ingress_tc_count: number of ingress trafic classes + * @egress_tc_count: number of egress trafic classes + * @pool_size_unit: pool size may be in credits, each credit is + * @pool_size_unit bytes + */ +struct nfp_shared_buf { + __le32 id; + __le32 size; + __le16 ingress_pools_count; + __le16 egress_pools_count; + __le16 ingress_tc_count; + __le16 egress_tc_count; + + __le32 pool_size_unit; +}; + +/** + * struct nfp_shared_buf_pool_id - shared buffer pool identification + * @shared_buf: shared buffer id + * @pool: pool index + */ +struct nfp_shared_buf_pool_id { + __le32 shared_buf; + __le32 pool; +}; + +/** + * struct nfp_shared_buf_pool_info_get - struct devlink_sb_pool_info mirror + * @pool_type: one of enum devlink_sb_pool_type + * @size: pool size in units of SB's @pool_size_unit + * @threshold_type: one of enum devlink_sb_threshold_type + */ +struct nfp_shared_buf_pool_info_get { + __le32 pool_type; + __le32 size; + __le32 threshold_type; +}; + +/** + * struct nfp_shared_buf_pool_info_set - packed args of sb_pool_set + * @id: pool identification info + * @size: pool size in units of SB's @pool_size_unit + * @threshold_type: one of enum devlink_sb_threshold_type + */ +struct nfp_shared_buf_pool_info_set { + struct nfp_shared_buf_pool_id id; + __le32 size; + __le32 threshold_type; }; #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index b1e67cf4257a..73c7fcc820ac 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -149,6 +149,26 @@ out: return ret; } +static int +nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, struct devlink_sb_pool_info *pool_info) +{ + struct nfp_pf *pf = devlink_priv(devlink); + + return nfp_shared_buf_pool_get(pf, sb_index, pool_index, pool_info); +} + +static int +nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, + u32 size, enum devlink_sb_threshold_type threshold_type) +{ + struct nfp_pf *pf = devlink_priv(devlink); + + return nfp_shared_buf_pool_set(pf, sb_index, pool_index, + size, threshold_type); +} + static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct nfp_pf *pf = devlink_priv(devlink); @@ -159,6 +179,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) const struct devlink_ops nfp_devlink_ops = { .port_split = nfp_devlink_port_split, .port_unsplit = nfp_devlink_port_unsplit, + .sb_pool_get = nfp_devlink_sb_pool_get, + .sb_pool_set = nfp_devlink_sb_pool_set, .eswitch_mode_get = nfp_devlink_eswitch_mode_get, }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index e3e1fa84ccd7..595b3dc280e3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -46,10 +46,10 @@ #include #include #include +#include struct dentry; struct device; -struct devlink_ops; struct pci_dev; struct nfp_cpp; @@ -62,6 +62,7 @@ struct nfp_nsp_identify; struct nfp_port; struct nfp_rtsym; struct nfp_rtsym_table; +struct nfp_shared_buf; /** * struct nfp_dumpspec - NFP FW dump specification structure @@ -110,6 +111,8 @@ struct nfp_dumpspec { * @ports: Linked list of port structures (struct nfp_port) * @wq: Workqueue for running works which need to grab @lock * @port_refresh_work: Work entry for taking netdevs out + * @shared_bufs: Array of shared buffer structures if FW has any SBs + * @num_shared_bufs: Number of elements in @shared_bufs * @lock: Protects all fields which may change after probe */ struct nfp_pf { @@ -162,6 +165,9 @@ struct nfp_pf { struct workqueue_struct *wq; struct work_struct port_refresh_work; + struct nfp_shared_buf *shared_bufs; + unsigned int num_shared_bufs; + struct mutex lock; }; @@ -200,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, struct ethtool_dump *dump_param, void *dest); +int nfp_shared_buf_register(struct nfp_pf *pf); +void nfp_shared_buf_unregister(struct nfp_pf *pf); +int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index f8abb4cd9cef..b98422112385 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -728,6 +728,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_app_clean; + err = nfp_shared_buf_register(pf); + if (err) + goto err_devlink_unreg; + mutex_lock(&pf->lock); pf->ddir = nfp_net_debugfs_device_add(pf->pdev); @@ -761,6 +765,8 @@ err_free_vnics: err_clean_ddir: nfp_net_debugfs_dir_clean(&pf->ddir); mutex_unlock(&pf->lock); + nfp_shared_buf_unregister(pf); +err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); devlink_unregister(devlink); err_app_clean: @@ -788,6 +794,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) mutex_unlock(&pf->lock); + nfp_shared_buf_unregister(pf); devlink_unregister(priv_to_devlink(pf)); nfp_net_pf_free_irqs(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c new file mode 100644 index 000000000000..0ecd83705368 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfp_abi.h" +#include "nfp_app.h" +#include "nfp_main.h" + +static u32 nfp_shared_buf_pool_unit(struct nfp_pf *pf, unsigned int sb) +{ + __le32 sb_id = cpu_to_le32(sb); + unsigned int i; + + for (i = 0; i < pf->num_shared_bufs; i++) + if (pf->shared_bufs[i].id == sb_id) + return le32_to_cpu(pf->shared_bufs[i].pool_size_unit); + + WARN_ON_ONCE(1); + return 0; +} + +int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct nfp_shared_buf_pool_info_get get_data; + struct nfp_shared_buf_pool_id id = { + .shared_buf = cpu_to_le32(sb), + .pool = cpu_to_le32(pool_index), + }; + unsigned int unit_size; + int n; + + unit_size = nfp_shared_buf_pool_unit(pf, sb); + if (!unit_size) + return -EINVAL; + + n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id), + &get_data, sizeof(get_data)); + if (n < 0) + return n; + if (n < sizeof(get_data)) + return -EIO; + + pool_info->pool_type = le32_to_cpu(get_data.pool_type); + pool_info->threshold_type = le32_to_cpu(get_data.threshold_type); + pool_info->size = le32_to_cpu(get_data.size) * unit_size; + + return 0; +} + +int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct nfp_shared_buf_pool_info_set set_data = { + .id = { + .shared_buf = cpu_to_le32(sb), + .pool = cpu_to_le32(pool_index), + }, + .threshold_type = cpu_to_le32(threshold_type), + }; + unsigned int unit_size; + + unit_size = nfp_shared_buf_pool_unit(pf, sb); + if (!unit_size || size % unit_size) + return -EINVAL; + set_data.size = cpu_to_le32(size / unit_size); + + return nfp_mbox_cmd(pf, NFP_MBOX_POOL_SET, &set_data, sizeof(set_data), + NULL, 0); +} + +int nfp_shared_buf_register(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + unsigned int i, num_entries, entry_sz; + struct nfp_cpp_area *sb_desc_area; + u8 __iomem *sb_desc; + int n, err; + + if (!pf->mbox) + return 0; + + n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0); + if (n <= 0) + return n; + num_entries = n; + + sb_desc = nfp_pf_map_rtsym(pf, "sb_tbl", NFP_SHARED_BUF_TABLE_SYM_NAME, + num_entries * sizeof(pf->shared_bufs[0]), + &sb_desc_area); + if (IS_ERR(sb_desc)) + return PTR_ERR(sb_desc); + + entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries; + + pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]), + GFP_KERNEL); + if (!pf->shared_bufs) { + err = -ENOMEM; + goto err_release_area; + } + + for (i = 0; i < num_entries; i++) { + struct nfp_shared_buf *sb = &pf->shared_bufs[i]; + + /* Entries may be larger in future FW */ + memcpy_fromio(sb, sb_desc + i * entry_sz, sizeof(*sb)); + + err = devlink_sb_register(devlink, + le32_to_cpu(sb->id), + le32_to_cpu(sb->size), + le16_to_cpu(sb->ingress_pools_count), + le16_to_cpu(sb->egress_pools_count), + le16_to_cpu(sb->ingress_tc_count), + le16_to_cpu(sb->egress_tc_count)); + if (err) + goto err_unreg_prev; + } + pf->num_shared_bufs = num_entries; + + nfp_cpp_area_release_free(sb_desc_area); + + return 0; + +err_unreg_prev: + while (i--) + devlink_sb_unregister(devlink, + le32_to_cpu(pf->shared_bufs[i].id)); + kfree(pf->shared_bufs); +err_release_area: + nfp_cpp_area_release_free(sb_desc_area); + return err; +} + +void nfp_shared_buf_unregister(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + unsigned int i; + + for (i = 0; i < pf->num_shared_bufs; i++) + devlink_sb_unregister(devlink, + le32_to_cpu(pf->shared_bufs[i].id)); + kfree(pf->shared_bufs); +} -- cgit v1.2.3 From b586c77b3c657a87e5a754c69b9a40c2301416dc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:46 -0700 Subject: nfp: core: allow 4-byte aligned accesses to Memory Units Current code doesn't enforce length requirements on 32bit accesses with action NFP_CPP_ACTION_RW to memory units, but if the access is only aligned to 4 bytes as well we will fall into the explicit access case and error out. Such accesses are correct, allow them by lowering the width earlier. While at it use a switch statement to improve readability. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 94 ++++++++++------------ 1 file changed, 44 insertions(+), 50 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index a0e336bd1d85..749655c329b2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -933,7 +933,6 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, u32 *wrptr32 = kernel_vaddr; const u32 __iomem *rdptr32; int n, width; - bool is_64; priv = nfp_cpp_area_priv(area); rdptr64 = priv->iomem + offset; @@ -943,10 +942,15 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, return -EFAULT; width = priv->width.read; - if (width <= 0) return -EINVAL; + /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + /* Unaligned? Translate to an explicit access */ if ((priv->offset + offset) & (width - 1)) return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), @@ -956,36 +960,29 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, priv->offset + offset, kernel_vaddr, length, width); - is_64 = width == TARGET_WIDTH_64; - - /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) - is_64 = false; + if (WARN_ON(!priv->bar)) + return -EFAULT; - if (is_64) { - if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) - return -EINVAL; - } else { + switch (width) { + case TARGET_WIDTH_32: if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) return -EINVAL; - } - if (WARN_ON(!priv->bar)) - return -EFAULT; + for (n = 0; n < length; n += sizeof(u32)) + *wrptr32++ = __raw_readl(rdptr32++); + return n; +#ifdef __raw_readq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; - if (is_64) -#ifndef __raw_readq - return -EINVAL; -#else for (n = 0; n < length; n += sizeof(u64)) *wrptr64++ = __raw_readq(rdptr64++); + return n; #endif - else - for (n = 0; n < length; n += sizeof(u32)) - *wrptr32++ = __raw_readl(rdptr32++); - - return n; + default: + return -EINVAL; + } } static int @@ -999,7 +996,6 @@ nfp6000_area_write(struct nfp_cpp_area *area, struct nfp6000_area_priv *priv; u32 __iomem *wrptr32; int n, width; - bool is_64; priv = nfp_cpp_area_priv(area); wrptr64 = priv->iomem + offset; @@ -1009,10 +1005,15 @@ nfp6000_area_write(struct nfp_cpp_area *area, return -EFAULT; width = priv->width.write; - if (width <= 0) return -EINVAL; + /* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + /* Unaligned? Translate to an explicit access */ if ((priv->offset + offset) & (width - 1)) return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), @@ -1022,40 +1023,33 @@ nfp6000_area_write(struct nfp_cpp_area *area, priv->offset + offset, kernel_vaddr, length, width); - is_64 = width == TARGET_WIDTH_64; - - /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) - is_64 = false; + if (WARN_ON(!priv->bar)) + return -EFAULT; - if (is_64) { - if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) - return -EINVAL; - } else { + switch (width) { + case TARGET_WIDTH_32: if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) return -EINVAL; - } - if (WARN_ON(!priv->bar)) - return -EFAULT; + for (n = 0; n < length; n += sizeof(u32)) { + __raw_writel(*rdptr32++, wrptr32++); + wmb(); + } + return n; +#ifdef __raw_writeq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; - if (is_64) -#ifndef __raw_writeq - return -EINVAL; -#else for (n = 0; n < length; n += sizeof(u64)) { __raw_writeq(*rdptr64++, wrptr64++); wmb(); } + return n; #endif - else - for (n = 0; n < length; n += sizeof(u32)) { - __raw_writel(*rdptr32++, wrptr32++); - wmb(); - } - - return n; + default: + return -EINVAL; + } } struct nfp6000_explicit_priv { -- cgit v1.2.3 From c4c8f39a57bf5057fc51a848d42b7e348ecfa31d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:47 -0700 Subject: nfp: abm: add initial active buffer management NIC skeleton Add a very rudimentary active buffer management NIC support. For now it's like a core NIC without SR-IOV support. Next commits will extend its functionality. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/Kconfig | 13 ++++ drivers/net/ethernet/netronome/nfp/Makefile | 5 ++ drivers/net/ethernet/netronome/nfp/abm/main.c | 85 +++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 47 +++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_app.c | 3 + drivers/net/ethernet/netronome/nfp/nfp_app.h | 2 + 6 files changed, 155 insertions(+) create mode 100644 drivers/net/ethernet/netronome/nfp/abm/main.c create mode 100644 drivers/net/ethernet/netronome/nfp/abm/main.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index ae0c46ba7546..66f15b05b65e 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -36,6 +36,19 @@ config NFP_APP_FLOWER either directly, with Open vSwitch, or any other way. Note that TC Flower offload requires specific FW to work. +config NFP_APP_ABM_NIC + bool "NFP4000/NFP6000 Advanced buffer management NIC support" + depends on NFP + depends on NET_SWITCHDEV + default y + help + Enable driver support for Advanced buffer management NIC on NFP. + ABM NIC allows advanced configuration of queuing and scheduling + of packets, including ECN marking. Say Y, if you are planning to + use one of the NFP4000 and NFP6000 platforms which support this + functionality. + Code will be built into the nfp.ko driver. + config NFP_DEBUG bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers" depends on NFP diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index f032d645911c..3d79b709d77a 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -53,4 +53,9 @@ nfp-objs += \ bpf/jit.o endif +ifeq ($(CONFIG_NFP_APP_ABM_NIC),y) +nfp-objs += \ + abm/main.o +endif + nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c new file mode 100644 index 000000000000..7a0a807bea4a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nsp.h" +#include "../nfp_app.h" +#include "../nfp_main.h" +#include "main.h" + +static int nfp_abm_init(struct nfp_app *app) +{ + struct nfp_pf *pf = app->pf; + struct nfp_abm *abm; + + if (!pf->eth_tbl) { + nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); + return -EINVAL; + } + if (pf->max_data_vnics != pf->eth_tbl->count) { + nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", + pf->max_data_vnics, pf->eth_tbl->count); + return -EINVAL; + } + if (!pf->mac_stats_bar) { + nfp_warn(app->cpp, "ABM NIC requires mac_stats symbol\n"); + return -EINVAL; + } + + abm = kzalloc(sizeof(*abm), GFP_KERNEL); + if (!abm) + return -ENOMEM; + app->priv = abm; + abm->app = app; + + return 0; +} + +static void nfp_abm_clean(struct nfp_app *app) +{ + struct nfp_abm *abm = app->priv; + + kfree(abm); + app->priv = NULL; +} + +const struct nfp_app_type app_abm = { + .id = NFP_APP_ACTIVE_BUFFER_MGMT_NIC, + .name = "abm", + + .init = nfp_abm_init, + .clean = nfp_abm_clean, + + .vnic_alloc = nfp_app_nic_vnic_alloc, +}; diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h new file mode 100644 index 000000000000..e54b6f64ee10 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NFP_ABM_H__ +#define __NFP_ABM_H__ 1 + +struct nfp_app; + +/** + * struct nfp_abm - ABM NIC app structure + * @app: back pointer to nfp_app + */ +struct nfp_abm { + struct nfp_app *app; +}; +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 0e0253c7e17b..c9d8a7ab311e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -54,6 +54,9 @@ static const struct nfp_app_type *apps[] = { #ifdef CONFIG_NFP_APP_FLOWER [NFP_APP_FLOWER_NIC] = &app_flower, #endif +#ifdef CONFIG_NFP_APP_ABM_NIC + [NFP_APP_ACTIVE_BUFFER_MGMT_NIC] = &app_abm, +#endif }; struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 2d9cb2528fc7..654cabd95ee4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -57,11 +57,13 @@ enum nfp_app_id { NFP_APP_CORE_NIC = 0x1, NFP_APP_BPF_NIC = 0x2, NFP_APP_FLOWER_NIC = 0x3, + NFP_APP_ACTIVE_BUFFER_MGMT_NIC = 0x4, }; extern const struct nfp_app_type app_nic; extern const struct nfp_app_type app_bpf; extern const struct nfp_app_type app_flower; +extern const struct nfp_app_type app_abm; /** * struct nfp_app_type - application definition -- cgit v1.2.3 From cc54dc2804f2b038af299aee89a9ff47454248de Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:48 -0700 Subject: nfp: abm: create project-specific vNIC structure ABM NIC requires more complex vNIC handling, allocate per-vNIC structure. Find out RX queue base and PCI PF id. There will be multiple PFs sharing the same MAC port, therefore the MAC address assigned to the vNIC must be looked up in the HWInfo database. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 58 +++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.c | 104 ++++++++++++++++++++++- drivers/net/ethernet/netronome/nfp/abm/main.h | 20 +++++ drivers/net/ethernet/netronome/nfp/nfp_app.h | 2 + drivers/net/ethernet/netronome/nfp/nfp_app_nic.c | 5 +- 6 files changed, 186 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/abm/ctrl.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 3d79b709d77a..6373f56205fd 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -55,6 +55,7 @@ endif ifeq ($(CONFIG_NFP_APP_ABM_NIC),y) nfp-objs += \ + abm/ctrl.o \ abm/main.o endif diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c new file mode 100644 index 000000000000..e40f6f06417b --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "../nfpcore/nfp_cpp.h" +#include "../nfp_app.h" +#include "../nfp_main.h" +#include "../nfp_net.h" +#include "main.h" + +void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) +{ + alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ); + alink->queue_base /= alink->vnic->stride_rx; +} + +int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) +{ + struct nfp_pf *pf = abm->app->pf; + unsigned int pf_id; + + pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = pf_id; + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 7a0a807bea4a..1985e58ab0db 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -32,16 +32,108 @@ * SOFTWARE. */ +#include + +#include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nsp.h" #include "../nfp_app.h" #include "../nfp_main.h" +#include "../nfp_net.h" +#include "../nfp_port.h" #include "main.h" +static void +nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, + unsigned int id) +{ + struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; + u8 mac_addr[ETH_ALEN]; + const char *mac_str; + char name[32]; + + if (id > pf->eth_tbl->count) { + nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + snprintf(name, sizeof(name), "eth%u.mac.pf%u", + eth_port->eth_index, abm->pf_id); + + mac_str = nfp_hwinfo_lookup(pf->hwinfo, name); + if (!mac_str) { + nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n", + name); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &mac_addr[0], &mac_addr[1], &mac_addr[2], + &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { + nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", + mac_str); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); +} + +static int +nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) +{ + struct nfp_abm *abm = app->priv; + struct nfp_abm_link *alink; + int err; + + alink = kzalloc(sizeof(*alink), GFP_KERNEL); + if (!alink) + return -ENOMEM; + nn->app_priv = alink; + alink->abm = abm; + alink->vnic = nn; + alink->id = id; + + nn->port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, nn->dp.netdev); + if (IS_ERR(nn->port)) { + err = PTR_ERR(nn->port); + goto err_free_alink; + } + + err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id); + if (err < 0) + goto err_free_port; + if (nn->port->type == NFP_PORT_INVALID) + /* core will kill this vNIC */ + return 0; + + nfp_abm_vnic_set_mac(app->pf, abm, nn, id); + nfp_abm_ctrl_read_params(alink); + + return 0; + +err_free_port: + nfp_port_free(nn->port); +err_free_alink: + kfree(alink); + return err; +} + +static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_abm_link *alink = nn->app_priv; + + kfree(alink); +} + static int nfp_abm_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; struct nfp_abm *abm; + int err; if (!pf->eth_tbl) { nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); @@ -63,7 +155,16 @@ static int nfp_abm_init(struct nfp_app *app) app->priv = abm; abm->app = app; + err = nfp_abm_ctrl_find_addrs(abm); + if (err) + goto err_free_abm; + return 0; + +err_free_abm: + kfree(abm); + app->priv = NULL; + return err; } static void nfp_abm_clean(struct nfp_app *app) @@ -81,5 +182,6 @@ const struct nfp_app_type app_abm = { .init = nfp_abm_init, .clean = nfp_abm_clean, - .vnic_alloc = nfp_app_nic_vnic_alloc, + .vnic_alloc = nfp_abm_vnic_alloc, + .vnic_free = nfp_abm_vnic_free, }; diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index e54b6f64ee10..3c5a01c96ecd 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -36,12 +36,32 @@ #define __NFP_ABM_H__ 1 struct nfp_app; +struct nfp_net; /** * struct nfp_abm - ABM NIC app structure * @app: back pointer to nfp_app + * @pf_id: ID of our PF link */ struct nfp_abm { struct nfp_app *app; + unsigned int pf_id; }; + +/** + * struct nfp_abm_link - port tuple of a ABM NIC + * @abm: back pointer to nfp_abm + * @vnic: data vNIC + * @id: id of the data vNIC + * @queue_base: id of base to host queue within PCIe (not QC idx) + */ +struct nfp_abm_link { + struct nfp_abm *abm; + struct nfp_net *vnic; + unsigned int id; + unsigned int queue_base; +}; + +void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); +int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 654cabd95ee4..fdf2593ae151 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -412,5 +412,7 @@ void nfp_app_free(struct nfp_app *app); int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id); +int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, + struct nfp_net *nn, unsigned int id); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index b9618c37403f..e2dfe4f168bb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -38,9 +38,8 @@ #include "nfp_net.h" #include "nfp_port.h" -static int -nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, - struct nfp_net *nn, unsigned int id) +int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, + struct nfp_net *nn, unsigned int id) { int err; -- cgit v1.2.3 From 634c6b7a85d46a891b2edee075bb422043dcb8da Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:49 -0700 Subject: nfp: add app pointer to port representors nfp_apps can currently associate their structures with vNICs but not representors. Add app priv pointer to representors as well. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index cd756a15445f..8dca283c05c3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -76,6 +76,7 @@ struct nfp_repr_pcpu_stats { * @port: Port of representor * @app: APP handle * @stats: Statistic of packets hitting CPU + * @app_priv: Pointer for APP data */ struct nfp_repr { struct net_device *netdev; @@ -83,6 +84,7 @@ struct nfp_repr { struct nfp_port *port; struct nfp_app *app; struct nfp_repr_pcpu_stats __percpu *stats; + void *app_priv; }; /** -- cgit v1.2.3 From 4afa3af4181edda539cce7889a6efcde0e6a0a5c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:51 -0700 Subject: nfp: add devlink_eswitch_mode_set callback Our previous apps all assumed to use only one eswitch mode (legacy or switchdev) without the ability to change it. ABM NIC will want to support the switch so plumb devlink_eswitch_mode_set through. The devlink_eswitch_mode_set is expected to spawn representors and potentially devlink ports so it's called under big devlink lock and pf->lock. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.h | 9 +++++++++ drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 13 +++++++++++++ 2 files changed, 22 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index fdf2593ae151..23b99a4e05c2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -97,6 +97,7 @@ extern const struct nfp_app_type app_abm; * @bpf: BPF ndo offload-related calls * @xdp_offload: offload an XDP program * @eswitch_mode_get: get SR-IOV eswitch mode + * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock) * @sriov_enable: app-specific sriov initialisation * @sriov_disable: app-specific sriov clean-up * @repr_get: get representor netdev @@ -148,6 +149,7 @@ struct nfp_app_type { void (*sriov_disable)(struct nfp_app *app); enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); + int (*eswitch_mode_set)(struct nfp_app *app, u16 mode); struct net_device *(*repr_get)(struct nfp_app *app, u32 id); }; @@ -372,6 +374,13 @@ static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode) return 0; } +static inline int nfp_app_eswitch_mode_set(struct nfp_app *app, u16 mode) +{ + if (!app->type->eswitch_mode_set) + return -EOPNOTSUPP; + return app->type->eswitch_mode_set(app, mode); +} + static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs) { if (!app || !app->type->sriov_enable) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 73c7fcc820ac..71c2edd83031 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -176,12 +176,25 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return nfp_app_eswitch_mode_get(pf->app, mode); } +static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct nfp_pf *pf = devlink_priv(devlink); + int ret; + + mutex_lock(&pf->lock); + ret = nfp_app_eswitch_mode_set(pf->app, mode); + mutex_unlock(&pf->lock); + + return ret; +} + const struct devlink_ops nfp_devlink_ops = { .port_split = nfp_devlink_port_split, .port_unsplit = nfp_devlink_port_unsplit, .sb_pool_get = nfp_devlink_sb_pool_get, .sb_pool_set = nfp_devlink_sb_pool_set, .eswitch_mode_get = nfp_devlink_eswitch_mode_get, + .eswitch_mode_set = nfp_devlink_eswitch_mode_set, }; int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) -- cgit v1.2.3 From d05d902eda23274d8123bc509d896226845eaf94 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:52 -0700 Subject: nfp: abm: spawn port netdevs To configure buffering points we need full set of netdevs: ASIC user netdev -- | -- PCIe port MAC port -- | -- Configuring egrees qdiscs on user netdev configures standard Linux TC software qdiscs, configuring PCIe port qdiscs will provide a way of setting ASIC queuing parameters for PCIe block. MAC port netdev egress qdiscs correspond to ASIC MAC Traffic Manager block. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 234 ++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/abm/main.h | 8 + drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 1 + 4 files changed, 225 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 1985e58ab0db..d8f1bdac02b8 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -32,7 +32,12 @@ * SOFTWARE. */ +#include #include +#include +#include +#include +#include #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -40,9 +45,195 @@ #include "../nfp_app.h" #include "../nfp_main.h" #include "../nfp_net.h" +#include "../nfp_net_repr.h" #include "../nfp_port.h" #include "main.h" +static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) +{ + return FIELD_PREP(NFP_ABM_PORTID_TYPE, rtype) | + FIELD_PREP(NFP_ABM_PORTID_ID, id); +} + +static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) +{ + enum nfp_repr_type rtype; + struct nfp_reprs *reprs; + u8 port; + + rtype = FIELD_GET(NFP_ABM_PORTID_TYPE, port_id); + port = FIELD_GET(NFP_ABM_PORTID_ID, port_id); + + reprs = rcu_dereference(app->reprs[rtype]); + if (!reprs) + return NULL; + + if (port >= reprs->num_reprs) + return NULL; + + return rcu_dereference(reprs->reprs[port]); +} + +static int +nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, + enum nfp_port_type ptype) +{ + struct net_device *netdev; + enum nfp_repr_type rtype; + struct nfp_reprs *reprs; + struct nfp_repr *repr; + struct nfp_port *port; + int err; + + if (ptype == NFP_PORT_PHYS_PORT) + rtype = NFP_REPR_TYPE_PHYS_PORT; + else + rtype = NFP_REPR_TYPE_PF; + + netdev = nfp_repr_alloc(app); + if (!netdev) + return -ENOMEM; + repr = netdev_priv(netdev); + repr->app_priv = alink; + + port = nfp_port_alloc(app, ptype, netdev); + if (IS_ERR(port)) { + err = PTR_ERR(port); + goto err_free_repr; + } + + if (ptype == NFP_PORT_PHYS_PORT) { + err = nfp_port_init_phy_port(app->pf, app, port, alink->id); + if (err) + goto err_free_port; + } else { + port->pf_id = alink->abm->pf_id; + port->vnic = alink->vnic->dp.ctrl_bar; + } + + SET_NETDEV_DEV(netdev, &alink->vnic->pdev->dev); + eth_hw_addr_random(netdev); + + err = nfp_repr_init(app, netdev, nfp_abm_portid(rtype, alink->id), + port, alink->vnic->dp.netdev); + if (err) + goto err_free_port; + + reprs = nfp_reprs_get_locked(app, rtype); + WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr"); + rcu_assign_pointer(reprs->reprs[alink->id], netdev); + + nfp_info(app->cpp, "%s Port %d Representor(%s) created\n", + ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys", + alink->id, netdev->name); + + return 0; + +err_free_port: + nfp_port_free(port); +err_free_repr: + nfp_repr_free(netdev); + return err; +} + +static void +nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink, + enum nfp_repr_type rtype) +{ + struct net_device *netdev; + struct nfp_reprs *reprs; + + reprs = nfp_reprs_get_locked(app, rtype); + netdev = nfp_repr_get_locked(app, reprs, alink->id); + if (!netdev) + return; + rcu_assign_pointer(reprs->reprs[alink->id], NULL); + synchronize_rcu(); + /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */ + nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev)); +} + +static void +nfp_abm_kill_reprs(struct nfp_abm *abm, struct nfp_abm_link *alink) +{ + nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PF); + nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PHYS_PORT); +} + +static void nfp_abm_kill_reprs_all(struct nfp_abm *abm) +{ + struct nfp_pf *pf = abm->app->pf; + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) + nfp_abm_kill_reprs(abm, (struct nfp_abm_link *)nn->app_priv); +} + +static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) +{ + struct nfp_abm *abm = app->priv; + + return abm->eswitch_mode; +} + +static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) +{ + nfp_abm_kill_reprs_all(abm); + + abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + return 0; +} + +static void nfp_abm_eswitch_clean_up(struct nfp_abm *abm) +{ + if (abm->eswitch_mode != DEVLINK_ESWITCH_MODE_LEGACY) + WARN_ON(nfp_abm_eswitch_set_legacy(abm)); +} + +static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) +{ + struct nfp_app *app = abm->app; + struct nfp_pf *pf = app->pf; + struct nfp_net *nn; + int err; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + struct nfp_abm_link *alink = nn->app_priv; + + err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PHYS_PORT); + if (err) + goto err_kill_all_reprs; + + err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PF_PORT); + if (err) + goto err_kill_all_reprs; + } + + abm->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + return 0; + +err_kill_all_reprs: + nfp_abm_kill_reprs_all(abm); + return err; +} + +static int nfp_abm_eswitch_mode_set(struct nfp_app *app, u16 mode) +{ + struct nfp_abm *abm = app->priv; + + if (abm->eswitch_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + return nfp_abm_eswitch_set_legacy(abm); + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + return nfp_abm_eswitch_set_switchdev(abm); + default: + return -EINVAL; + } +} + static void nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, unsigned int id) @@ -87,7 +278,6 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { struct nfp_abm *abm = app->priv; struct nfp_abm_link *alink; - int err; alink = kzalloc(sizeof(*alink), GFP_KERNEL); if (!alink) @@ -97,41 +287,26 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) alink->vnic = nn; alink->id = id; - nn->port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, nn->dp.netdev); - if (IS_ERR(nn->port)) { - err = PTR_ERR(nn->port); - goto err_free_alink; - } - - err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id); - if (err < 0) - goto err_free_port; - if (nn->port->type == NFP_PORT_INVALID) - /* core will kill this vNIC */ - return 0; + netif_keep_dst(nn->dp.netdev); nfp_abm_vnic_set_mac(app->pf, abm, nn, id); nfp_abm_ctrl_read_params(alink); return 0; - -err_free_port: - nfp_port_free(nn->port); -err_free_alink: - kfree(alink); - return err; } static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) { struct nfp_abm_link *alink = nn->app_priv; + nfp_abm_kill_reprs(alink->abm, alink); kfree(alink); } static int nfp_abm_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; + struct nfp_reprs *reprs; struct nfp_abm *abm; int err; @@ -159,8 +334,21 @@ static int nfp_abm_init(struct nfp_app *app) if (err) goto err_free_abm; + err = -ENOMEM; + reprs = nfp_reprs_alloc(pf->max_data_vnics); + if (!reprs) + goto err_free_abm; + RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs); + + reprs = nfp_reprs_alloc(pf->max_data_vnics); + if (!reprs) + goto err_free_phys; + RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PF], reprs); + return 0; +err_free_phys: + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); err_free_abm: kfree(abm); app->priv = NULL; @@ -171,6 +359,9 @@ static void nfp_abm_clean(struct nfp_app *app) { struct nfp_abm *abm = app->priv; + nfp_abm_eswitch_clean_up(abm); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); kfree(abm); app->priv = NULL; } @@ -184,4 +375,9 @@ const struct nfp_app_type app_abm = { .vnic_alloc = nfp_abm_vnic_alloc, .vnic_free = nfp_abm_vnic_free, + + .eswitch_mode_get = nfp_abm_eswitch_mode_get, + .eswitch_mode_set = nfp_abm_eswitch_mode_set, + + .repr_get = nfp_abm_repr_get, }; diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 3c5a01c96ecd..5938b69b8a84 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -35,17 +35,25 @@ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 +#include + struct nfp_app; struct nfp_net; +#define NFP_ABM_PORTID_TYPE GENMASK(23, 16) +#define NFP_ABM_PORTID_ID GENMASK(7, 0) + /** * struct nfp_abm - ABM NIC app structure * @app: back pointer to nfp_app * @pf_id: ID of our PF link + * @eswitch_mode: devlink eswitch mode, advanced functions only visible + * in switchdev mode */ struct nfp_abm { struct nfp_app *app; unsigned int pf_id; + enum devlink_eswitch_mode eswitch_mode; }; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 6e79da91e475..09e87d5f4f72 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -385,7 +385,7 @@ err_free_netdev: return NULL; } -static void nfp_repr_clean_and_free(struct nfp_repr *repr) +void nfp_repr_clean_and_free(struct nfp_repr *repr) { nfp_info(repr->app->cpp, "Destroying Representor(%s)\n", repr->netdev->name); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 8dca283c05c3..8366e4f3c623 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -127,6 +127,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, struct net_device *pf_netdev); void nfp_repr_free(struct net_device *netdev); struct net_device *nfp_repr_alloc(struct nfp_app *app); +void nfp_repr_clean_and_free(struct nfp_repr *repr); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type); -- cgit v1.2.3 From 1f70036723e10b165627bb11a7809eed80d43a16 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:53 -0700 Subject: nfp: abm: force Ethternet port up Current control firmware does not cater too well to multi-host applications. There is no way to check which hosts are up or otherwise negotiate what the state of the external port (the Ethernet port) should be. Make sure the link is up when driver loads, and don't take it down when Ethernet port netdev is closed. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 14 ++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_port.c | 2 ++ drivers/net/ethernet/netronome/nfp/nfp_port.h | 2 ++ 3 files changed, 18 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index d8f1bdac02b8..7afd24ce79a5 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -103,6 +103,7 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, } if (ptype == NFP_PORT_PHYS_PORT) { + port->eth_forced = true; err = nfp_port_init_phy_port(app->pf, app, port, alink->id); if (err) goto err_free_port; @@ -276,8 +277,10 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, static int nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { + struct nfp_eth_table_port *eth_port = &app->pf->eth_tbl->ports[id]; struct nfp_abm *abm = app->priv; struct nfp_abm_link *alink; + int err; alink = kzalloc(sizeof(*alink), GFP_KERNEL); if (!alink) @@ -287,12 +290,23 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) alink->vnic = nn; alink->id = id; + /* This is a multi-host app, make sure MAC/PHY is up, but don't + * make the MAC/PHY state follow the state of any of the ports. + */ + err = nfp_eth_set_configured(app->cpp, eth_port->index, true); + if (err < 0) + goto err_free_alink; + netif_keep_dst(nn->dp.netdev); nfp_abm_vnic_set_mac(app->pf, abm, nn, id); nfp_abm_ctrl_read_params(alink); return 0; + +err_free_alink: + kfree(alink); + return err; } static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 7bd8be5c833b..a17f1ace6988 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -218,6 +218,8 @@ int nfp_port_configure(struct net_device *netdev, bool configed) eth_port = __nfp_port_get_eth_port(port); if (!eth_port) return 0; + if (port->eth_forced) + return 0; err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); return err < 0 && err != -EOPNOTSUPP ? err : 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index fa7e669a969c..1d9b2b47e27d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -77,6 +77,7 @@ enum nfp_port_flags { * @app: backpointer to the app structure * @dl_port: devlink port structure * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme + * @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) @@ -99,6 +100,7 @@ struct nfp_port { /* NFP_PORT_PHYS_PORT */ struct { unsigned int eth_id; + bool eth_forced; struct nfp_eth_table_port *eth_port; u8 __iomem *eth_stats; }; -- cgit v1.2.3 From 290f54db317e99edf9fda8ac1d4d27fcd5c7743f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:54 -0700 Subject: nfp: use split in naming of PCIe PF ports PCI PFs can host more than one logical endpoint. In NFP terms this means having more than one vNIC for PCIe PF. The vNICs are usually corresponding 1:1 to Ethernet ports. In core NIC we use the legacy idea of vNIC *being* the Ethernet port, hence netdevs put pX(sY) in their phys_port_name, like Ethernet ports would. When ASIC ports are fully represented we need to be able to name different PCIe PF ports, too. Use a scheme similar to Ethernet ports - pfXsY, for PCIe PF number X, sub-port Y. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 2 ++ drivers/net/ethernet/netronome/nfp/nfp_port.c | 6 +++++- drivers/net/ethernet/netronome/nfp/nfp_port.h | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 7afd24ce79a5..5a12bb20bced 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -109,6 +109,8 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, goto err_free_port; } else { port->pf_id = alink->abm->pf_id; + port->pf_split = app->pf->max_data_vnics > 1; + port->pf_split_id = alink->id; port->vnic = alink->vnic->dp.ctrl_bar; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index a17f1ace6988..9c1298114c70 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -181,7 +181,11 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) eth_port->label_subport); break; case NFP_PORT_PF_PORT: - n = snprintf(name, len, "pf%d", port->pf_id); + if (!port->pf_split) + n = snprintf(name, len, "pf%d", port->pf_id); + else + n = snprintf(name, len, "pf%ds%d", port->pf_id, + port->pf_split_id); break; case NFP_PORT_VF_PORT: n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 1d9b2b47e27d..18666750456e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -82,6 +82,8 @@ enum nfp_port_flags { * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id + * @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC + * @pf_split_id:for %NFP_PORT_PF_PORT ID of PCI PF vNIC (valid if @pf_split) * @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory * @port_list: entry on pf's list of ports */ @@ -108,6 +110,8 @@ struct nfp_port { struct { unsigned int pf_id; unsigned int vf_id; + bool pf_split; + unsigned int pf_split_id; u8 __iomem *vnic; }; }; -- cgit v1.2.3 From 51c1df83e35ce0e24ca10037c73245cb4c8ac11a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 May 2018 22:12:55 -0700 Subject: nfp: assign vNIC id as phys_port_name of vNICs which are not ports When NFP is modelled as a switch we assign phys_port_name to respective port(representor )s: vNIC0 - | - PF port (pf%d) MAC/PHY (p%d[s%d]) - |E== In most cases there is only one vNIC for communication with the switch. If there is more than one we need to be able to identify them. Use %d as phys_port_name of the vNICs. We don't have to pass ID to nfp_net_debugfs_vnic_add() separately any more. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 7 +++++-- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 20 +++++++++++++++++++- drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c | 4 ++-- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 4 +++- drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c | 2 +- 5 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index bd7d8ae31e17..57cb035dcc6d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -545,6 +545,7 @@ struct nfp_net_dp { /** * struct nfp_net - NFP network device structure * @dp: Datapath structure + * @id: vNIC id within the PF (0 for VFs) * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware @@ -597,6 +598,8 @@ struct nfp_net { struct nfp_net_fw_version fw_ver; + u32 id; + u32 cap; u32 max_mtu; @@ -909,7 +912,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, void nfp_net_debugfs_create(void); void nfp_net_debugfs_destroy(void); struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); -void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id); +void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir); void nfp_net_debugfs_dir_clean(struct dentry **dir); #else static inline void nfp_net_debugfs_create(void) @@ -926,7 +929,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) } static inline void -nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) +nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d9111c077699..eea11e881bf5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3277,6 +3277,24 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int +nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nfp_net *nn = netdev_priv(netdev); + int n; + + if (nn->port) + return nfp_port_get_phys_port_name(netdev, name, len); + + if (!nn->dp.is_vf) { + n = snprintf(name, len, "%d", nn->id); + if (n >= len) + return -EINVAL; + } + + return 0; +} + /** * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW * @nn: NFP Net device to reconfigure @@ -3475,7 +3493,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = nfp_net_set_mac_address, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_get_phys_port_name = nfp_port_get_phys_port_name, + .ndo_get_phys_port_name = nfp_net_get_phys_port_name, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_bpf = nfp_net_xdp, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 67cdd8330c59..099b63d67451 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -201,7 +201,7 @@ static const struct file_operations nfp_xdp_q_fops = { .llseek = seq_lseek }; -void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) +void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { struct dentry *queues, *tx, *rx, *xdp; char name[20]; @@ -211,7 +211,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) return; if (nfp_net_is_data_vnic(nn)) - sprintf(name, "vnic%d", id); + sprintf(name, "vnic%d", nn->id); else strcpy(name, "ctrl-vnic"); nn->debugfs_dir = debugfs_create_dir(name, ddir); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index b98422112385..28516eecccc8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -178,11 +178,13 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; + nn->id = id; + err = nfp_net_init(nn); if (err) return err; - nfp_net_debugfs_vnic_add(nn, pf->ddir, id); + nfp_net_debugfs_vnic_add(nn, pf->ddir); if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index b802a1d55449..68928c86b698 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -283,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, nfp_net_info(nn); vf->ddir = nfp_net_debugfs_device_add(pdev); - nfp_net_debugfs_vnic_add(nn, vf->ddir, 0); + nfp_net_debugfs_vnic_add(nn, vf->ddir); return 0; -- cgit v1.2.3 From a1f5d1f0dfccea54e1d9c2da16fd8c9b54f81a75 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 22 May 2018 12:17:59 +0200 Subject: net: phy: sfp: warn the user when no tx_disable pin is available In case no Tx disable pin is available the SFP modules will always be emitting. This could be an issue when using modules using laser as their light source as we would have no way to disable it when the fiber is removed. This patch adds a warning when registering an SFP cage which do not have its tx_disable pin wired or available. Signed-off-by: Antoine Tenart Acked-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4ab6e9a50bbe..a91d12209a81 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1065,6 +1065,15 @@ static int sfp_probe(struct platform_device *pdev) if (poll) mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + /* We could have an issue in cases no Tx disable pin is available or + * wired as modules using a laser as their light source will continue to + * be active when the fiber is removed. This could be a safety issue and + * we should at least warn the user about that. + */ + if (!sfp->gpio[GPIO_TX_DISABLE]) + dev_warn(sfp->dev, + "No tx_disable pin: SFP modules will always be emitting.\n"); + return 0; } -- cgit v1.2.3 From 66ede1f9c9466cdf866b4be40a814a140d5df60a Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 22 May 2018 12:18:00 +0200 Subject: net: phy: sfp: make the i2c-bus dt property mandatory This patch makes the i2c-bus property mandatory when using a device tree. If the sfp i2c bus isn't described it's impossible to guess the protocol to use for a given module, and the sfp module would then not work in most cases. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index a91d12209a81..c4c92db86dfa 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -976,6 +976,7 @@ static int sfp_probe(struct platform_device *pdev) if (pdev->dev.of_node) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *id; + struct i2c_adapter *i2c; struct device_node *np; id = of_match_node(sfp_of_match, node); @@ -985,19 +986,20 @@ static int sfp_probe(struct platform_device *pdev) sff = sfp->type = id->data; np = of_parse_phandle(node, "i2c-bus", 0); - if (np) { - struct i2c_adapter *i2c; - - i2c = of_find_i2c_adapter_by_node(np); - of_node_put(np); - if (!i2c) - return -EPROBE_DEFER; - - err = sfp_i2c_configure(sfp, i2c); - if (err < 0) { - i2c_put_adapter(i2c); - return err; - } + if (!np) { + dev_err(sfp->dev, "missing 'i2c-bus' property\n"); + return -ENODEV; + } + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + if (!i2c) + return -EPROBE_DEFER; + + err = sfp_i2c_configure(sfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; } } -- cgit v1.2.3 From 8eea1ca82be90a7e7a4624ab9cb323574a5f71df Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 22 May 2018 11:34:40 -0400 Subject: gso: limit udp gso to egress-only virtual devices Until the udp receive stack supports large packets (UDP GRO), GSO packets must not loop from the egress to the ingress path. Revert the change that added NETIF_F_GSO_UDP_L4 to various virtual devices through NETIF_F_GSO_ENCAP_ALL as this included devices that may loop packets, such as veth and macvlan. Instead add it to specific devices that forward to another device's egress path, bonding and team. Fixes: 83aa025f535f ("udp: add gso support to virtual devices") CC: Alexander Duyck Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 5 +++-- drivers/net/team/team.c | 5 +++-- include/linux/netdev_features.h | 1 - 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 06efdf6a762b..fea17b92b1ae 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1107,7 +1107,8 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; - bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_GSO_UDP_L4; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -4268,7 +4269,7 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; bond_dev->features |= bond_dev->hw_features; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 9dbd390ace34..d6ff881165d0 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1026,7 +1026,8 @@ static void __team_compute_features(struct team *team) } team->dev->vlan_features = vlan_features; - team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -2117,7 +2118,7 @@ static void team_setup(struct net_device *dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; } diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index c87c3a3453c1..623bb8ced060 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -220,7 +220,6 @@ enum { NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ - NETIF_F_GSO_UDP_L4 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) -- cgit v1.2.3 From 7c6f97475da04ba3e80c62ca8cd271e34290a76e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 22 May 2018 17:18:09 +0100 Subject: net: vxge: fix spelling mistake in macro VXGE_HW_ERR_PRIVILAGED_OPEARATION Rename VXGE_HW_ERR_PRIVILAGED_OPEARATION to VXGE_HW_ERR_PRIVILEGED_OPERATION to fix spelling mistake. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-config.c | 12 ++++++------ drivers/net/ethernet/neterion/vxge/vxge-config.h | 2 +- drivers/net/ethernet/neterion/vxge/vxge-ethtool.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 6223930a8155..c60da9e8bf14 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -693,7 +693,7 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) return VXGE_HW_OK; else - return VXGE_HW_ERR_PRIVILAGED_OPEARATION; + return VXGE_HW_ERR_PRIVILEGED_OPERATION; } /* @@ -1920,7 +1920,7 @@ enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, } if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; goto exit; } @@ -3153,7 +3153,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { @@ -3165,7 +3165,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { @@ -3279,7 +3279,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { @@ -3291,7 +3291,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index cfa970417f81..d743a37a3cee 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -127,7 +127,7 @@ enum vxge_hw_status { VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, - VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, + VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17, VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index 0452848d1316..03c3d1230c17 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -276,7 +276,7 @@ static void vxge_get_ethtool_stats(struct net_device *dev, *ptr++ = 0; status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats); if (status != VXGE_HW_OK) { - if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) { + if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) { vxge_debug_init(VXGE_ERR, "%s : %d Failure in getting xmac stats", __func__, __LINE__); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b2299f2b2155..a8918bb7c802 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3484,11 +3484,11 @@ static int vxge_device_register(struct __vxge_hw_device *hldev, 0, &stat); - if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) + if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION) vxge_debug_init( vxge_hw_device_trace_level_get(hldev), "%s: device stats clear returns" - "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); + "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name); vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), "%s: %s:%d Exiting...", -- cgit v1.2.3 From 273de02ae7d828d72d66eb5fce181e1f3f96d0cd Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Tue, 22 May 2018 11:29:34 -0700 Subject: hv_netvsc: Add handlers for ethtool get/set msg level The handlers for ethtool get/set msg level are missing from netvsc. This patch adds them. Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index da07ccdf84bf..60a5769ef5a1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1618,8 +1618,24 @@ static int netvsc_set_ringparam(struct net_device *ndev, return ret; } +static u32 netvsc_get_msglevel(struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + + return ndev_ctx->msg_enable; +} + +static void netvsc_set_msglevel(struct net_device *ndev, u32 val) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + + ndev_ctx->msg_enable = val; +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, + .get_msglevel = netvsc_get_msglevel, + .set_msglevel = netvsc_set_msglevel, .get_link = ethtool_op_get_link, .get_ethtool_stats = netvsc_get_ethtool_stats, .get_sset_count = netvsc_get_sset_count, -- cgit v1.2.3 From 038ed45bcec1c048ec8d9bc462cf0912f8a0eb52 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 23 May 2018 11:36:59 +0530 Subject: cxgb4: Add new T6 device ids Add 0x6088 and 0x6089 device ids for new T6 cards. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index adacc6399131..c7f8d0441278 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -212,6 +212,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ -- cgit v1.2.3 From 64cf81675a1f64c1b311e4611dd3b6a961607612 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:19 +0200 Subject: net: dsa: qca8k: Add support for QCA8334 switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for the four-port variant of the Qualcomm QCA833x switch. Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 757b6d90ea36..eb7aceb20f8a 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -939,6 +939,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, qca8k_suspend, qca8k_resume); static const struct of_device_id qca8k_of_match[] = { + { .compatible = "qca,qca8334" }, { .compatible = "qca,qca8337" }, { /* sentinel */ }, }; -- cgit v1.2.3 From eee1fe64765c562d8bcaf95e5631a8ea2f760f34 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:20 +0200 Subject: net: dsa: qca8k: Enable RXMAC when bringing up a port MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a port is brought up/down do not enable/disable only the TXMAC but the RXMAC as well. This is essential for the CPU port to work. Fixes: 6b93fb46480a ("net-next: dsa: add new driver for qca8xxx family") Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index eb7aceb20f8a..a32744e40baf 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -473,7 +473,7 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) static void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) { - u32 mask = QCA8K_PORT_STATUS_TXMAC; + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; /* Port 0 and 6 have no internal PHY */ if ((port > 0) && (port < 6)) -- cgit v1.2.3 From 79a4ed4f0f93fc65e48a0fc5247ffa5645f7b0cc Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:21 +0200 Subject: net: dsa: qca8k: Force CPU port to its highest bandwidth MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By default autonegotiation is enabled to configure MAC on all ports. For the CPU port autonegotiation can not be used so we need to set some sensible defaults manually. This patch forces the default setting of the CPU port to 1000Mbps/full duplex which is the chip maximum capability. Also correct size of the bit field used to configure link speed. Fixes: 6b93fb46480a ("net-next: dsa: add new driver for qca8xxx family") Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 6 +++++- drivers/net/dsa/qca8k.h | 6 ++++-- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a32744e40baf..0d7e71528be5 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -490,6 +490,7 @@ qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int ret, i, phy_mode = -1; + u32 mask; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -515,7 +516,10 @@ qca8k_setup(struct dsa_switch *ds) if (ret < 0) return ret; - /* Enable CPU Port */ + /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ + mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | + QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; + qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1cf8a920d4ff..5bda16555478 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -51,8 +51,10 @@ #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) -#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) -#define QCA8K_PORT_STATUS_SPEED_S 0 +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) +#define QCA8K_PORT_STATUS_SPEED_10 0 +#define QCA8K_PORT_STATUS_SPEED_100 0x1 +#define QCA8K_PORT_STATUS_SPEED_1000 0x2 #define QCA8K_PORT_STATUS_TXMAC BIT(2) #define QCA8K_PORT_STATUS_RXMAC BIT(3) #define QCA8K_PORT_STATUS_TXFLOW BIT(4) -- cgit v1.2.3 From 9bb2289f90e671bdb78e306974187424ac19ff8e Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:22 +0200 Subject: net: dsa: qca8k: Allow overwriting CPU port setting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement adjust_link function that allows to overwrite default CPU port setting using fixed-link device tree subnode. Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 43 +++++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/qca8k.h | 1 + 2 files changed, 44 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 0d7e71528be5..8113399018d8 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -587,6 +587,47 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static void +qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + /* Force fixed-link setting for CPU port, skip others. */ + if (!phy_is_pseudo_fixed_link(phy)) + return; + + /* Set port speed */ + switch (phy->speed) { + case 10: + reg = QCA8K_PORT_STATUS_SPEED_10; + break; + case 100: + reg = QCA8K_PORT_STATUS_SPEED_100; + break; + case 1000: + reg = QCA8K_PORT_STATUS_SPEED_1000; + break; + default: + dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", + port, phy->speed); + return; + } + + /* Set duplex mode */ + if (phy->duplex == DUPLEX_FULL) + reg |= QCA8K_PORT_STATUS_DUPLEX; + + /* Force flow control */ + if (dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; + + /* Force link down before changing MAC options */ + qca8k_port_set_status(priv, port, 0); + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); + qca8k_port_set_status(priv, port, 1); +} + static int qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) { @@ -841,6 +882,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port) static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, + .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, .phy_read = qca8k_phy_read, .phy_write = qca8k_phy_write, @@ -872,6 +914,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->bus = mdiodev->bus; + priv->dev = &mdiodev->dev; /* read the switches ID register */ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 5bda16555478..613fe5c50236 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -167,6 +167,7 @@ struct qca8k_priv { struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; + struct device *dev; }; struct qca8k_mib_desc { -- cgit v1.2.3 From 63a786a31033a32cb89498c9bb4e6069ebe9f8b0 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:23 +0200 Subject: net: dsa: qca8k: Replace GPL boilerplate by SPDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the GPLv2 license boilerplate with the SPDX license identifier. Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 8113399018d8..46c178fb3fb9 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009 Felix Fietkau * Copyright (C) 2011-2012 Gabor Juhos * Copyright (c) 2015, The Linux Foundation. All rights reserved. * Copyright (c) 2016 John Crispin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include -- cgit v1.2.3 From 38222b1aacf39c92e72d42a809609c725f2e9f83 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 23 May 2018 08:20:24 +0200 Subject: net: dsa: qca8k: Remove redundant parentheses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warning reported by checkpatch. Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 46c178fb3fb9..cdcde7f8e0b2 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -468,7 +468,7 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; /* Port 0 and 6 have no internal PHY */ - if ((port > 0) && (port < 6)) + if (port > 0 && port < 6) mask |= QCA8K_PORT_STATUS_LINK_AUTO; if (enable) -- cgit v1.2.3 From 1d19023fa6f65de636901b9b7340f2f7eebb710a Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 23 May 2018 20:02:58 +0530 Subject: cxgb4: change the port capability bits definition MDI Port Capabilities bit definitions were inconsistent with regard to the MDI enum values. 2 bits used to define MDI in the port capabilities are not really separable, it's a 2-bit field with 4 different values. Change the port capability bit definitions to be "AUTO" and "STRAIGHT" in order to get them to line up with the enum's. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 8 ++++---- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 2 +- drivers/scsi/csiostor/csio_hw.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index df5e7c79223b..537ed072db4a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3941,8 +3941,8 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); @@ -3982,8 +3982,8 @@ static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); - CAP32_TO_CAP16(MDIX); CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); CAP32_TO_CAP16(FEC_BASER_RS); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index e6b2e9549d56..2d91480a5a0e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2471,8 +2471,8 @@ enum fw_port_cap { FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, - FW_PORT_CAP_MDIX = 0x0200, - FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_MDIAUTO = 0x0200, + FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, FW_PORT_CAP_FEC_RESERVED = 0x2000, @@ -2515,8 +2515,8 @@ enum fw_port_mdi { #define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL #define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL #define FW_PORT_CAP32_ANEG 0x00100000UL -#define FW_PORT_CAP32_MDIX 0x00200000UL -#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_MDIAUTO 0x00200000UL +#define FW_PORT_CAP32_MDISTRAIGHT 0x00400000UL #define FW_PORT_CAP32_FEC_RS 0x00800000UL #define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL #define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 798695bf8678..3017f7873ff9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -341,8 +341,8 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 96bbb82c826d..a10cf25ee7f9 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1500,8 +1500,8 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); -- cgit v1.2.3 From 8156b0ba7413238e473ef567ebecd755f92275c5 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 23 May 2018 20:03:33 +0530 Subject: cxgb4: do L1 config when module is inserted trigger an L1 configure operation when a transceiver module is inserted in order to cause current "sticky" options like Requested Forward Error Correction to be reapplied. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 26 ++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 11 +++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 40 +++++++++++++++++++++---- 3 files changed, 65 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 211086bdbe20..0f305d97f4ee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -491,6 +491,9 @@ struct link_config { unsigned char link_ok; /* link up? */ unsigned char link_down_rc; /* link down reason */ + + bool new_module; /* ->OS Transceiver Module inserted */ + bool redo_l1cfg; /* ->CC redo current "sticky" L1 CFG */ }; #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) @@ -1324,7 +1327,7 @@ static inline unsigned int qtimer_val(const struct adapter *adap, extern char cxgb4_driver_name[]; extern const char cxgb4_driver_version[]; -void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_portmod_changed(struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); @@ -1505,8 +1508,25 @@ void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); int t4_wait_dev_ready(void __iomem *regs); -int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc); + +int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox, + unsigned int port, struct link_config *lc, + bool sleep_ok, int timeout); + +static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) +{ + return t4_link_l1cfg_core(adapter, mbox, port, lc, + true, FW_CMD_MAX_TIMEOUT); +} + +static inline int t4_link_l1cfg_ns(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) +{ + return t4_link_l1cfg_core(adapter, mbox, port, lc, + false, FW_CMD_MAX_TIMEOUT); +} + int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); u32 t4_read_pcie_cfg4(struct adapter *adap, int reg); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 130d1eed7993..513e1d356384 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -301,14 +301,14 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) } } -void t4_os_portmod_changed(const struct adapter *adap, int port_id) +void t4_os_portmod_changed(struct adapter *adap, int port_id) { static const char *mod_str[] = { NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; - const struct net_device *dev = adap->port[port_id]; - const struct port_info *pi = netdev_priv(dev); + struct net_device *dev = adap->port[port_id]; + struct port_info *pi = netdev_priv(dev); if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) netdev_info(dev, "port module unplugged\n"); @@ -325,6 +325,11 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) else netdev_info(dev, "%s: unknown module type %d inserted\n", dev->name, pi->mod_type); + + /* If the interface is running, then we'll need any "sticky" Link + * Parameters redone with a new Transceiver Module. + */ + pi->link_cfg.redo_l1cfg = netif_running(dev); } int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 537ed072db4a..704f6960a6ea 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4058,14 +4058,16 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, - unsigned int port, struct link_config *lc) +int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc, + bool sleep_ok, int timeout) { unsigned int fw_caps = adapter->params.fw_caps_support; - struct fw_port_cmd cmd; - unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; + struct fw_port_cmd cmd; + unsigned int fw_mdi; + fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); /* Convert driver coding of Pause Frame Flow Control settings into the * Firmware's API. */ @@ -4087,7 +4089,7 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, /* Figure out what our Requested Port Capabilities are going to be. */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { - rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + rcap = lc->acaps | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { @@ -4113,7 +4115,8 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); else cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); - return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + return t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, + sleep_ok, timeout); } /** @@ -8335,6 +8338,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); + lc->new_module = false; + lc->redo_l1cfg = false; + if (mod_type != pi->mod_type) { /* With the newer SFP28 and QSFP28 Transceiver Module Types, * various fundamental Port Capabilities which used to be @@ -8369,6 +8375,8 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) pi->port_type = port_type; pi->mod_type = mod_type; + + lc->new_module = t4_is_inserted_mod_type(mod_type); t4_os_portmod_changed(adapter, pi->port_id); } @@ -8401,6 +8409,26 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) t4_os_link_changed(adapter, pi->port_id, link_ok); } + + if (lc->new_module && lc->redo_l1cfg) { + struct link_config old_lc; + int ret; + + /* Save the current L1 Configuration and restore it if an + * error occurs. We probably should fix the l1_cfg*() + * routines not to change the link_config when an error + * occurs ... + */ + old_lc = *lc; + ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc); + if (ret) { + *lc = old_lc; + dev_warn(adapter->pdev_dev, + "Attempt to update new Transceiver Module settings failed\n"); + } + } + lc->new_module = false; + lc->redo_l1cfg = false; } /** -- cgit v1.2.3 From 6c2799c11e3681729f448706e6484cffad8a5633 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:11 -0500 Subject: amd-xgbe: Fix debug output of max channel counts A debug output print statement uses the wrong variable to output the maximum Rx channel count (cut and paste error, basically). Fix the statement to use the proper variable. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 82d1f416ee2a..7b63521a5f4d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -344,7 +344,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (netif_msg_probe(pdata)) { dev_dbg(dev, "max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, - pdata->tx_max_channel_count); + pdata->rx_max_channel_count); dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); } -- cgit v1.2.3 From b93c3ab6006b379f2df238693b6e131cba9b37b3 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:20 -0500 Subject: amd-xgbe: Read and save the port property registers during probe Read and save the port property registers once during the device probe and then use the saved values as they are needed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 34 +++++++++++---- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 68 ++++++++++++----------------- drivers/net/ethernet/amd/xgbe/xgbe.h | 7 +++ 3 files changed, 62 insertions(+), 47 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 7b63521a5f4d..7b86240ecd5f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -335,12 +335,29 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR; + /* Read the port property registers */ + pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); + pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); + pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); + pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); + pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); + if (netif_msg_probe(pdata)) { + dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0); + dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1); + dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2); + dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3); + dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4); + } + /* Set the maximum channels and queues */ - reg = XP_IOREAD(pdata, XP_PROP_1); - pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); - pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); - pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); - pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); + pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_QUEUES); if (netif_msg_probe(pdata)) { dev_dbg(dev, "max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, @@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) xgbe_set_counts(pdata); /* Set the maximum fifo amounts */ - reg = XP_IOREAD(pdata, XP_PROP_2); - pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); + pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + TX_FIFO_SIZE); pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->vdata->tx_max_fifo_size); - pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); + pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + RX_FIFO_SIZE); pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->vdata->rx_max_fifo_size); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index aac884314000..123ceb0540d7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -2421,22 +2421,21 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; - - reg = XP_IOREAD(pdata, XP_PROP_3); phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + - XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); + XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_ADDR); - phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); + phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_MASK); - phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RX_LOS); - phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_TX_FAULT); - phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MOD_ABS); - phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RATE_SELECT); if (netif_msg_probe(pdata)) { @@ -2458,18 +2457,17 @@ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg, mux_addr_hi, mux_addr_lo; + unsigned int mux_addr_hi, mux_addr_lo; - reg = XP_IOREAD(pdata, XP_PROP_4); - - mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); - mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); + mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); if (mux_addr_lo == XGBE_SFP_DIRECT) return; phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; - phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); + phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, + MUX_CHAN); if (netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "SFP: mux_address=%#x\n", @@ -2592,13 +2590,11 @@ static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data) static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return 0; - reg = XP_IOREAD(pdata, XP_PROP_3); - phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); + phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); switch (phy_data->mdio_reset) { case XGBE_MDIO_RESET_NONE: case XGBE_MDIO_RESET_I2C_GPIO: @@ -2612,12 +2608,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + - XP_GET_BITS(reg, XP_PROP_3, + XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR); - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_GPIO); } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) { - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_INT_GPIO); } @@ -2707,12 +2703,9 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) { - unsigned int reg; - - reg = XP_IOREAD(pdata, XP_PROP_0); - if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) return false; - if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) return false; return true; @@ -2921,7 +2914,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; struct mii_bus *mii; - unsigned int reg; int ret; /* Check if enabled */ @@ -2940,12 +2932,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) return -ENOMEM; pdata->phy_data = phy_data; - reg = XP_IOREAD(pdata, XP_PROP_0); - phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); - phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); - phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); - phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); - phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); + phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); if (netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode); dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id); @@ -2954,12 +2945,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr); } - reg = XP_IOREAD(pdata, XP_PROP_4); - phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); - phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); - phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); - phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); - phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); + phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); if (phy_data->redrv && netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "redrv present\n"); dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 95d4b56448c6..54e43ad32d26 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1027,6 +1027,13 @@ struct xgbe_prv_data { void __iomem *xprop_regs; /* XGBE property registers */ void __iomem *xi2c_regs; /* XGBE I2C CSRs */ + /* Port property registers */ + unsigned int pp0; + unsigned int pp1; + unsigned int pp2; + unsigned int pp3; + unsigned int pp4; + /* Overall device lock */ spinlock_t lock; -- cgit v1.2.3 From 0d2b5255ea7b69f2270e41130ee25346b1f68b7b Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:29 -0500 Subject: amd-xgbe: Remove use of comm_owned field The comm_owned field can hide logic where double locking is attempted and prevent multiple threads for the same device from accessing the mutex properly. Remove the comm_owned field and use the mutex API exclusively for gaining ownership. The current driver has been audited and is obtaining communications ownership properly. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 123ceb0540d7..05003be89ff5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -327,8 +327,6 @@ struct xgbe_phy_data { unsigned int mdio_addr; - unsigned int comm_owned; - /* SFP Support */ enum xgbe_sfp_comm sfp_comm; unsigned int sfp_mux_address; @@ -382,12 +380,6 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata); static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op) { - struct xgbe_phy_data *phy_data = pdata->phy_data; - - /* Be sure we own the bus */ - if (WARN_ON(!phy_data->comm_owned)) - return -EIO; - return pdata->i2c_if.i2c_xfer(pdata, i2c_op); } @@ -549,10 +541,6 @@ static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata) static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) { - struct xgbe_phy_data *phy_data = pdata->phy_data; - - phy_data->comm_owned = 0; - mutex_unlock(&xgbe_phy_comm_lock); } @@ -562,9 +550,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) unsigned long timeout; unsigned int mutex_id; - if (phy_data->comm_owned) - return 0; - /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, * the driver needs to take the software mutex and then the hardware * mutexes before being able to use the busses. @@ -593,7 +578,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); - phy_data->comm_owned = 1; return 0; } -- cgit v1.2.3 From 67cea0c92264a657230ba14e1956c34132ca6161 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:38 -0500 Subject: amd-xgbe: Remove field that indicates SFP diagnostic support The driver currently sets an indication of whether the SFP supports, and that the driver can obtain, diagnostics data. This isn't currently used by the driver and the logic to set this indicator is flawed because the field is cleared each time the SFP is checked and only set when a new SFP is detected. Remove this field and the logic supporting it. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 05003be89ff5..cb15cafd5209 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -343,7 +343,6 @@ struct xgbe_phy_data { unsigned int sfp_rx_los; unsigned int sfp_tx_fault; unsigned int sfp_mod_absent; - unsigned int sfp_diags; unsigned int sfp_changed; unsigned int sfp_phy_avail; unsigned int sfp_cable_len; @@ -1211,13 +1210,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); - if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) { - u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG]; - - if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) - phy_data->sfp_diags = 1; - } - xgbe_phy_free_phy_device(pdata); } else { phy_data->sfp_changed = 0; @@ -1267,7 +1259,6 @@ static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data) phy_data->sfp_rx_los = 0; phy_data->sfp_tx_fault = 0; phy_data->sfp_mod_absent = 1; - phy_data->sfp_diags = 0; phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; -- cgit v1.2.3 From 53a1024abf3e4cb3310a54b08c48323649158c13 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:46 -0500 Subject: amd-xgbe: Add ethtool support to retrieve SFP module info Add support to get SFP module information using ethtool. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 18 ++++ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 21 ++++ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 137 +++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 13 +++ 4 files changed, 189 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index ff397bb25042..57394b77b2d3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -626,6 +626,22 @@ static int xgbe_get_ts_info(struct net_device *netdev, return 0; } +static int xgbe_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return pdata->phy_if.module_info(pdata, modinfo); +} + +static int xgbe_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return pdata->phy_if.module_eeprom(pdata, eeprom, data); +} + static const struct ethtool_ops xgbe_ethtool_ops = { .get_drvinfo = xgbe_get_drvinfo, .get_msglevel = xgbe_get_msglevel, @@ -646,6 +662,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_ts_info = xgbe_get_ts_info, .get_link_ksettings = xgbe_get_link_ksettings, .set_link_ksettings = xgbe_set_link_ksettings, + .get_module_info = xgbe_get_module_info, + .get_module_eeprom = xgbe_get_module_eeprom, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 1b45cd73a258..9c39c72d25ab 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -126,6 +126,24 @@ #include "xgbe.h" #include "xgbe-common.h" +static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data) +{ + if (!pdata->phy_if.phy_impl.module_eeprom) + return -ENXIO; + + return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data); +} + +static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo) +{ + if (!pdata->phy_if.phy_impl.module_info) + return -ENXIO; + + return pdata->phy_if.phy_impl.module_info(pdata, modinfo); +} + static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) { int reg; @@ -1639,4 +1657,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) phy_if->phy_valid_speed = xgbe_phy_valid_speed; phy_if->an_isr = xgbe_an_combined_isr; + + phy_if->module_info = xgbe_phy_module_info; + phy_if->module_eeprom = xgbe_phy_module_eeprom; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index cb15cafd5209..141bb13dda26 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -119,6 +119,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -270,6 +271,15 @@ struct xgbe_sfp_eeprom { u8 vendor[32]; }; +#define XGBE_SFP_DIAGS_SUPPORTED(_x) \ + ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ + !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + +#define XGBE_SFP_EEPROM_BASE_LEN 256 +#define XGBE_SFP_EEPROM_DIAG_LEN 256 +#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ + XGBE_SFP_EEPROM_DIAG_LEN) + #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " @@ -1301,6 +1311,130 @@ put: xgbe_phy_put_comm_ownership(pdata); } +static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; + struct xgbe_sfp_eeprom *sfp_eeprom; + unsigned int i, j, rem; + int ret; + + rem = eeprom->len; + + if (!eeprom->len) { + ret = -EINVAL; + goto done; + } + + if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) { + ret = -EINVAL; + goto done; + } + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { + ret = -ENXIO; + goto done; + } + + if (!netif_running(pdata->netdev)) { + ret = -EIO; + goto done; + } + + if (phy_data->sfp_mod_absent) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) { + netdev_err(pdata->netdev, "I2C error setting SFP MUX\n"); + ret = -EIO; + goto put_own; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); + if (ret) { + netdev_err(pdata->netdev, + "I2C error reading SFP EEPROM\n"); + ret = -EIO; + goto put_mux; + } + + sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; + + if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { + /* Read the SFP diagnostic eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, + XGBE_SFP_EEPROM_DIAG_LEN); + if (ret) { + netdev_err(pdata->netdev, + "I2C error reading SFP DIAGS\n"); + ret = -EIO; + goto put_mux; + } + } + + for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) { + if ((j >= XGBE_SFP_EEPROM_BASE_LEN) && + !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) + break; + + data[i] = eeprom_data[j]; + rem--; + } + +put_mux: + xgbe_phy_sfp_put_mux(pdata); + +put_own: + xgbe_phy_put_comm_ownership(pdata); + +done: + eeprom->len -= rem; + + return ret; +} + +static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return -ENXIO; + + if (!netif_running(pdata->netdev)) + return -EIO; + + if (phy_data->sfp_mod_absent) + return -EIO; + + if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + + return 0; +} + static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; @@ -3196,4 +3330,7 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; phy_impl->kr_training_post = xgbe_phy_kr_training_post; + + phy_impl->module_info = xgbe_phy_module_info; + phy_impl->module_eeprom = xgbe_phy_module_eeprom; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 54e43ad32d26..f0f455b79ff9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -835,6 +835,7 @@ struct xgbe_hw_if { * Optional routines: * an_pre, an_post * kr_training_pre, kr_training_post + * module_info, module_eeprom */ struct xgbe_phy_impl_if { /* Perform Setup/teardown actions */ @@ -883,6 +884,12 @@ struct xgbe_phy_impl_if { /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); + + /* SFP module related info */ + int (*module_info)(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data); }; struct xgbe_phy_if { @@ -905,6 +912,12 @@ struct xgbe_phy_if { /* For single interrupt support */ irqreturn_t (*an_isr)(struct xgbe_prv_data *); + /* For ethtool PHY support */ + int (*module_info)(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data); + /* PHY implementation specific services */ struct xgbe_phy_impl_if phy_impl; }; -- cgit v1.2.3 From bab748de986d786cbbef31d550bea3bc616304cb Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:38:56 -0500 Subject: amd-xgbe: Add ethtool show/set ring parameter support Add ethtool support to show and set the number of the Rx and Tx ring descriptors. Changing the ring configuration will result in a device restart. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +-- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 65 ++++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 6 +++ 3 files changed, 72 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7c204f05b418..2646c088452c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1426,10 +1426,8 @@ static void xgbe_stopdev(struct work_struct *work) netdev_alert(pdata->netdev, "device stopped\n"); } -static void xgbe_restart_dev(struct xgbe_prv_data *pdata) +void xgbe_restart_dev(struct xgbe_prv_data *pdata) { - DBGPR("-->xgbe_restart_dev\n"); - /* If not running, "restart" will happen on open */ if (!netif_running(pdata->netdev)) return; @@ -1440,8 +1438,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) xgbe_free_rx_data(pdata); xgbe_start(pdata); - - DBGPR("<--xgbe_restart_dev\n"); } static void xgbe_restart(struct work_struct *work) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 57394b77b2d3..d12f982d73b4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -642,6 +642,69 @@ static int xgbe_get_module_eeprom(struct net_device *netdev, return pdata->phy_if.module_eeprom(pdata, eeprom, data); } +static void xgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX; + ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX; + ringparam->rx_pending = pdata->rx_desc_count; + ringparam->tx_pending = pdata->tx_desc_count; +} + +static int xgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, tx; + + if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) { + netdev_err(netdev, "unsupported ring parameter\n"); + return -EINVAL; + } + + if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) || + (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) { + netdev_err(netdev, + "rx ring parameter must be between %u and %u\n", + XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX); + return -EINVAL; + } + + if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) || + (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) { + netdev_err(netdev, + "tx ring parameter must be between %u and %u\n", + XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX); + return -EINVAL; + } + + rx = __rounddown_pow_of_two(ringparam->rx_pending); + if (rx != ringparam->rx_pending) + netdev_notice(netdev, + "rx ring parameter rounded to power of two: %u\n", + rx); + + tx = __rounddown_pow_of_two(ringparam->tx_pending); + if (tx != ringparam->tx_pending) + netdev_notice(netdev, + "tx ring parameter rounded to power of two: %u\n", + tx); + + if ((rx == pdata->rx_desc_count) && + (tx == pdata->tx_desc_count)) + goto out; + + pdata->rx_desc_count = rx; + pdata->tx_desc_count = tx; + + xgbe_restart_dev(pdata); + +out: + return 0; +} + static const struct ethtool_ops xgbe_ethtool_ops = { .get_drvinfo = xgbe_get_drvinfo, .get_msglevel = xgbe_get_msglevel, @@ -664,6 +727,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .set_link_ksettings = xgbe_set_link_ksettings, .get_module_info = xgbe_get_module_info, .get_module_eeprom = xgbe_get_module_eeprom, + .get_ringparam = xgbe_get_ringparam, + .set_ringparam = xgbe_set_ringparam, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f0f455b79ff9..7dc0fac560a3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -144,6 +144,11 @@ #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) #define XGBE_RX_DESC_CNT 512 +#define XGBE_TX_DESC_CNT_MIN 64 +#define XGBE_TX_DESC_CNT_MAX 4096 +#define XGBE_RX_DESC_CNT_MIN 64 +#define XGBE_RX_DESC_CNT_MAX 4096 + #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) /* Descriptors required for maximum contiguous TSO/GSO packet */ @@ -1330,6 +1335,7 @@ int xgbe_powerup(struct net_device *, unsigned int); int xgbe_powerdown(struct net_device *, unsigned int); void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *); +void xgbe_restart_dev(struct xgbe_prv_data *pdata); #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); -- cgit v1.2.3 From 2244753409f5bc3e2eae4e2ec6f4ced239993f33 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:04 -0500 Subject: amd-xgbe: Prepare for ethtool set-channel support In order to support being able to dynamically set/change the number of Rx and Tx channels, update the code to: - Move alloc and free of device memory into callable functions - Move setting of the real number of Rx and Tx channels to device startup - Move mapping of the RSS channels to device startup Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 108 ++++++++++++++++++------------ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 20 +----- 2 files changed, 68 insertions(+), 60 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2646c088452c..397e3a0e1d67 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1312,14 +1312,72 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) return 0; } +static void xgbe_free_memory(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + + /* Free the ring descriptors and buffers */ + desc_if->free_ring_resources(pdata); + + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); +} + +static int xgbe_alloc_memory(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct net_device *netdev = pdata->netdev; + int ret; + + /* Calculate the Rx buffer size before allocating rings */ + pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); + + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + return ret; + + /* Allocate the ring descriptors and buffers */ + ret = desc_if->alloc_ring_resources(pdata); + if (ret) + goto err_channels; + + /* Initialize the service and Tx timers */ + xgbe_init_timers(pdata); + + return 0; + +err_channels: + xgbe_free_memory(pdata); + + return ret; +} + static int xgbe_start(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct net_device *netdev = pdata->netdev; + unsigned int i; int ret; - DBGPR("-->xgbe_start\n"); + /* Set the number of queues */ + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); + if (ret) { + netdev_err(netdev, "error setting real tx queue count\n"); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); + if (ret) { + netdev_err(netdev, "error setting real rx queue count\n"); + return ret; + } + + /* Set RSS lookup table data for programming */ + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); ret = hw_if->init(pdata); if (ret) @@ -1347,8 +1405,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata) clear_bit(XGBE_STOPPED, &pdata->dev_state); - DBGPR("<--xgbe_start\n"); - return 0; err_irqs: @@ -1823,11 +1879,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; int ret; - DBGPR("-->xgbe_open\n"); - /* Create the various names based on netdev name */ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", netdev_name(netdev)); @@ -1872,43 +1925,25 @@ static int xgbe_open(struct net_device *netdev) goto err_sysclk; } - /* Calculate the Rx buffer size before allocating rings */ - ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); - if (ret < 0) - goto err_ptpclk; - pdata->rx_buf_size = ret; - - /* Allocate the channel and ring structures */ - ret = xgbe_alloc_channels(pdata); - if (ret) - goto err_ptpclk; - - /* Allocate the ring descriptors and buffers */ - ret = desc_if->alloc_ring_resources(pdata); - if (ret) - goto err_channels; - INIT_WORK(&pdata->service_work, xgbe_service); INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); - xgbe_init_timers(pdata); + + ret = xgbe_alloc_memory(pdata); + if (ret) + goto err_ptpclk; ret = xgbe_start(pdata); if (ret) - goto err_rings; + goto err_mem; clear_bit(XGBE_DOWN, &pdata->dev_state); - DBGPR("<--xgbe_open\n"); - return 0; -err_rings: - desc_if->free_ring_resources(pdata); - -err_channels: - xgbe_free_channels(pdata); +err_mem: + xgbe_free_memory(pdata); err_ptpclk: clk_disable_unprepare(pdata->ptpclk); @@ -1928,18 +1963,11 @@ err_dev_wq: static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; - - DBGPR("-->xgbe_close\n"); /* Stop the device */ xgbe_stop(pdata); - /* Free the ring descriptors and buffers */ - desc_if->free_ring_resources(pdata); - - /* Free the channel and ring structures */ - xgbe_free_channels(pdata); + xgbe_free_memory(pdata); /* Disable the clocks */ clk_disable_unprepare(pdata->ptpclk); @@ -1953,8 +1981,6 @@ static int xgbe_close(struct net_device *netdev) set_bit(XGBE_DOWN, &pdata->dev_state); - DBGPR("<--xgbe_close\n"); - return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 441d0973957b..b41f23679a08 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) { struct net_device *netdev = pdata->netdev; struct device *dev = pdata->dev; - unsigned int i; int ret; netdev->irq = pdata->dev_irq; @@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) pdata->tx_ring_count, pdata->rx_ring_count); } - /* Set the number of queues */ - ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); - if (ret) { - dev_err(dev, "error setting real tx queue count\n"); - return ret; - } - - ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); - if (ret) { - dev_err(dev, "error setting real rx queue count\n"); - return ret; - } - - /* Initialize RSS hash key and lookup table */ + /* Initialize RSS hash key */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); - for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) - XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, - i % pdata->rx_ring_count); - XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); -- cgit v1.2.3 From 01b5277fc9984d9fb9156afa0b1be70b3b475825 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:13 -0500 Subject: amd-xgbe: Add ethtool show/set channels support Add ethtool support to show and set the device channel configuration. Changing the channel configuration will result in a device restart. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 25 +++++ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 134 +++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 4 + 3 files changed, 163 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 397e3a0e1d67..24f1053b8785 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1329,6 +1329,17 @@ static int xgbe_alloc_memory(struct xgbe_prv_data *pdata) struct net_device *netdev = pdata->netdev; int ret; + if (pdata->new_tx_ring_count) { + pdata->tx_ring_count = pdata->new_tx_ring_count; + pdata->tx_q_count = pdata->tx_ring_count; + pdata->new_tx_ring_count = 0; + } + + if (pdata->new_rx_ring_count) { + pdata->rx_ring_count = pdata->new_rx_ring_count; + pdata->new_rx_ring_count = 0; + } + /* Calculate the Rx buffer size before allocating rings */ pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); @@ -1482,6 +1493,20 @@ static void xgbe_stopdev(struct work_struct *work) netdev_alert(pdata->netdev, "device stopped\n"); } +void xgbe_full_restart_dev(struct xgbe_prv_data *pdata) +{ + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + xgbe_stop(pdata); + + xgbe_free_memory(pdata); + xgbe_alloc_memory(pdata); + + xgbe_start(pdata); +} + void xgbe_restart_dev(struct xgbe_prv_data *pdata) { /* If not running, "restart" will happen on open */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index d12f982d73b4..a880f10e3e70 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -705,6 +705,138 @@ out: return 0; } +static void xgbe_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, tx, combined; + + /* Calculate maximums allowed: + * - Take into account the number of available IRQs + * - Do not take into account the number of online CPUs so that + * the user can over-subscribe if desired + * - Tx is additionally limited by the number of hardware queues + */ + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); + rx = min(rx, pdata->channel_irq_count); + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); + tx = min(tx, pdata->channel_irq_count); + tx = min(tx, pdata->tx_max_q_count); + + combined = min(rx, tx); + + channels->max_combined = combined; + channels->max_rx = rx ? rx - 1 : 0; + channels->max_tx = tx ? tx - 1 : 0; + + /* Get current settings based on device state */ + rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count; + tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count; + + combined = min(rx, tx); + rx -= combined; + tx -= combined; + + channels->combined_count = combined; + channels->rx_count = rx; + channels->tx_count = tx; +} + +static void xgbe_print_set_channels_input(struct net_device *netdev, + struct ethtool_channels *channels) +{ + netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n", + channels->combined_count, channels->rx_count, + channels->tx_count); +} + +static int xgbe_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, rx_curr, tx, tx_curr, combined; + + /* Calculate maximums allowed: + * - Take into account the number of available IRQs + * - Do not take into account the number of online CPUs so that + * the user can over-subscribe if desired + * - Tx is additionally limited by the number of hardware queues + */ + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); + rx = min(rx, pdata->channel_irq_count); + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); + tx = min(tx, pdata->tx_max_q_count); + tx = min(tx, pdata->channel_irq_count); + + combined = min(rx, tx); + + /* Should not be setting other count */ + if (channels->other_count) { + netdev_err(netdev, + "other channel count must be zero\n"); + return -EINVAL; + } + + /* Require at least one Combined (Rx and Tx) channel */ + if (!channels->combined_count) { + netdev_err(netdev, + "at least one combined Rx/Tx channel is required\n"); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Check combined channels */ + if (channels->combined_count > combined) { + netdev_err(netdev, + "combined channel count cannot exceed %u\n", + combined); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Can have some Rx-only or Tx-only channels, but not both */ + if (channels->rx_count && channels->tx_count) { + netdev_err(netdev, + "cannot specify both Rx-only and Tx-only channels\n"); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Check that we don't exceed the maximum number of channels */ + if ((channels->combined_count + channels->rx_count) > rx) { + netdev_err(netdev, + "total Rx channels (%u) requested exceeds maximum available (%u)\n", + channels->combined_count + channels->rx_count, rx); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + if ((channels->combined_count + channels->tx_count) > tx) { + netdev_err(netdev, + "total Tx channels (%u) requested exceeds maximum available (%u)\n", + channels->combined_count + channels->tx_count, tx); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + rx = channels->combined_count + channels->rx_count; + tx = channels->combined_count + channels->tx_count; + + rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count; + tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count; + + if ((rx == rx_curr) && (tx == tx_curr)) + goto out; + + pdata->new_rx_ring_count = rx; + pdata->new_tx_ring_count = tx; + + xgbe_full_restart_dev(pdata); + +out: + return 0; +} + static const struct ethtool_ops xgbe_ethtool_ops = { .get_drvinfo = xgbe_get_drvinfo, .get_msglevel = xgbe_get_msglevel, @@ -729,6 +861,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_module_eeprom = xgbe_get_module_eeprom, .get_ringparam = xgbe_get_ringparam, .set_ringparam = xgbe_set_ringparam, + .get_channels = xgbe_get_channels, + .set_channels = xgbe_set_channels, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 7dc0fac560a3..7a412cfa8e40 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1122,6 +1122,9 @@ struct xgbe_prv_data { unsigned int rx_ring_count; unsigned int rx_desc_count; + unsigned int new_tx_ring_count; + unsigned int new_rx_ring_count; + unsigned int tx_max_q_count; unsigned int rx_max_q_count; unsigned int tx_q_count; @@ -1336,6 +1339,7 @@ int xgbe_powerdown(struct net_device *, unsigned int); void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *); void xgbe_restart_dev(struct xgbe_prv_data *pdata); +void xgbe_full_restart_dev(struct xgbe_prv_data *pdata); #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); -- cgit v1.2.3 From eca282b8418b0b5a55b70e42f684e882d3fb9654 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:21 -0500 Subject: amd-xgbe: Always attempt link training in KR mode Link training is always attempted when in KR mode, but the code is structured to check if link training has been enabled before attempting to perform it. Since that check will always be true, simplify the code to always enable and start link training during KR auto-negotiation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 69 +++++++------------------------ 1 file changed, 16 insertions(+), 53 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 9c39c72d25ab..450b89cd9eeb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -216,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) xgbe_an37_clear_interrupts(pdata); } -static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata) -{ - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - - reg |= XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); -} - -static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata) -{ - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - - reg &= ~XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); -} - static void xgbe_kr_mode(struct xgbe_prv_data *pdata) { - /* Enable KR training */ - xgbe_an73_enable_kr_training(pdata); - /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); @@ -250,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 2.5G speed */ pdata->hw_if.set_speed(pdata, SPEED_2500); @@ -262,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -278,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) if (pdata->kr_redrv) return xgbe_kr_mode(pdata); - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); @@ -290,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) static void xgbe_x_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -302,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -314,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -425,6 +384,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, { unsigned int reg; + /* Disable KR training for now */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg &= ~XGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + + /* Update AN settings */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg &= ~MDIO_AN_CTRL1_ENABLE; @@ -522,21 +487,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); /* Start KR training */ - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - if (reg & XGBE_KR_TRAINING_ENABLE) { - if (pdata->phy_if.phy_impl.kr_training_pre) - pdata->phy_if.phy_impl.kr_training_pre(pdata); + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); - reg |= XGBE_KR_TRAINING_START; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, - reg); + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg |= XGBE_KR_TRAINING_ENABLE; + reg |= XGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - netif_dbg(pdata, link, pdata->netdev, - "KR training initiated\n"); + netif_dbg(pdata, link, pdata->netdev, + "KR training initiated\n"); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - } + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); return XGBE_AN_PAGE_RECEIVED; } -- cgit v1.2.3 From 418746298e586c88edf3a7c340c8d19a0c13df77 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:31 -0500 Subject: amd-xgbe: Advertise FEC support with the KR re-driver When a KR re-driver is present, indicate the FEC support is available during auto-negotiation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 141bb13dda26..dd747f6c519b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -1720,6 +1720,10 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, XGBE_CLR_ADV(dlks, 1000baseKX_Full); XGBE_CLR_ADV(dlks, 10000baseKR_Full); + /* Advertise FEC support is present */ + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_ADV(dlks, 10000baseR_FEC); + switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_ADV(dlks, 10000baseKR_Full); -- cgit v1.2.3 From e722ec82374b7a0c0cfa954e4f780221139c5f93 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:39 -0500 Subject: amd-xgbe: Update the BelFuse quirk to support SGMII Instead of using a quirk to make the BelFuse 1GBT-SFP06 part look like a 1000baseX part, program the SFP PHY to support SGMII and 10/100/1000 baseT. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 109 +++++++++++++++++++--------- 1 file changed, 75 insertions(+), 34 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index dd747f6c519b..194a5696a82c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -860,6 +860,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return false; + if ((phy_id & 0xfffffff0) != 0x01ff0cc0) return false; @@ -885,8 +888,80 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) return true; } +static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + unsigned int phy_id = phy_data->phydev->phy_id; + int reg; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return false; + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) + return false; + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], + XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) + return false; + + if ((phy_id & 0xfffffff0) != 0x03625d10) + return false; + + /* Disable RGMII mode */ + phy_write(phy_data->phydev, 0x18, 0x7007); + reg = phy_read(phy_data->phydev, 0x18); + phy_write(phy_data->phydev, 0x18, reg & ~0x0080); + + /* Enable fiber register bank */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001); + + /* Power down SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg | 0x00800); + + /* Configure SGMII-to-Copper mode */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0006; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004); + + /* Power up SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); + + /* Enable copper register bank */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg); + + /* Power up SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); + + phy_data->phydev->supported = PHY_GBIT_FEATURES; + phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phy_data->phydev->advertising = phy_data->phydev->supported; + + netif_dbg(pdata, drv, pdata->netdev, + "BelFuse PHY quirk in place\n"); + + return true; +} + static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) { + if (xgbe_phy_belfuse_phy_quirks(pdata)) + return; + if (xgbe_phy_finisar_phy_quirks(pdata)) return; } @@ -1027,37 +1102,6 @@ static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) return false; } -static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) -{ - struct xgbe_phy_data *phy_data = pdata->phy_data; - struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; - - if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], - XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) - return false; - - if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], - XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) { - phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; - phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; - phy_data->sfp_speed = XGBE_SFP_SPEED_1000; - if (phy_data->sfp_changed) - netif_dbg(pdata, drv, pdata->netdev, - "Bel-Fuse SFP quirk in place\n"); - return true; - } - - return false; -} - -static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata) -{ - if (xgbe_phy_belfuse_parse_quirks(pdata)) - return true; - - return false; -} - static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1076,9 +1120,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); - if (xgbe_phy_sfp_parse_quirks(pdata)) - return; - /* Assume ACTIVE cable unless told it is PASSIVE */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; -- cgit v1.2.3 From 76cce0af85a0d5d8abef8b60eece5798ea7eea5a Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 May 2018 11:39:47 -0500 Subject: amd-xgbe: Improve SFP 100Mbps auto-negotiation After changing speed to 100Mbps as a result of auto-negotiation (AN), some 10/100/1000Mbps SFPs indicate a successful link (no faults or loss of signal), but cannot successfully transmit or receive data. These SFPs required an extra auto-negotiation (AN) after the speed change in order to operate properly. Add a quirk for these SFPs so that if the outcome of the AN actually results in changing to a new speed, re-initiate AN at that new speed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 77 ++++++++++++++++------------- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 6 +++ drivers/net/ethernet/amd/xgbe/xgbe.h | 1 + 3 files changed, 50 insertions(+), 34 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 450b89cd9eeb..4b5d625de8f0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -331,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata) xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); } -static void xgbe_set_mode(struct xgbe_prv_data *pdata, +static bool xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { if (mode == xgbe_cur_mode(pdata)) - return; + return false; xgbe_change_mode(pdata, mode); + + return true; } static bool xgbe_use_mode(struct xgbe_prv_data *pdata, @@ -1178,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) return 0; } -static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) { int ret; + mutex_lock(&pdata->an_mutex); + set_bit(XGBE_LINK_INIT, &pdata->dev_state); pdata->link_check = jiffies; ret = pdata->phy_if.phy_impl.an_config(pdata); if (ret) - return ret; + goto out; if (pdata->phy.autoneg != AUTONEG_ENABLE) { ret = xgbe_phy_config_fixed(pdata); if (ret || !pdata->kr_redrv) - return ret; + goto out; netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n"); } else { @@ -1202,24 +1206,27 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) /* Disable auto-negotiation interrupt */ disable_irq(pdata->an_irq); - /* Start auto-negotiation in a supported mode */ - if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { - xgbe_set_mode(pdata, XGBE_MODE_KR); - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { - xgbe_set_mode(pdata, XGBE_MODE_KX_2500); - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { - xgbe_set_mode(pdata, XGBE_MODE_KX_1000); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { - xgbe_set_mode(pdata, XGBE_MODE_SFI); - } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { - xgbe_set_mode(pdata, XGBE_MODE_X); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { - xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { - xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); - } else { - enable_irq(pdata->an_irq); - return -EINVAL; + if (set_mode) { + /* Start auto-negotiation in a supported mode */ + if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { + xgbe_set_mode(pdata, XGBE_MODE_KR); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_2500); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { + xgbe_set_mode(pdata, XGBE_MODE_SFI); + } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { + xgbe_set_mode(pdata, XGBE_MODE_X); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); + } else { + enable_irq(pdata->an_irq); + ret = -EINVAL; + goto out; + } } /* Disable and stop any in progress auto-negotiation */ @@ -1239,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) xgbe_an_init(pdata); xgbe_an_restart(pdata); - return 0; -} - -static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) -{ - int ret; - - mutex_lock(&pdata->an_mutex); - - ret = __xgbe_phy_config_aneg(pdata); +out: if (ret) set_bit(XGBE_LINK_ERR, &pdata->dev_state); else @@ -1259,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) return ret; } +static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +{ + return __xgbe_phy_config_aneg(pdata, true); +} + +static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) +{ + return __xgbe_phy_config_aneg(pdata, false); +} + static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) { return (pdata->an_result == XGBE_AN_COMPLETE); @@ -1315,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; - xgbe_set_mode(pdata, mode); + if (xgbe_set_mode(pdata, mode) && pdata->an_again) + xgbe_phy_reconfig_aneg(pdata); } static void xgbe_phy_status(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 194a5696a82c..3ceb4f95ca7c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -902,6 +902,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) return false; + /* For Bel-Fuse, use the extra AN flag */ + pdata->an_again = 1; + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) return false; @@ -978,6 +981,9 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) if (phy_data->phydev) return 0; + /* Clear the extra AN flag */ + pdata->an_again = 0; + /* Check for the use of an external PHY */ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) return 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 7a412cfa8e40..47bcbcf58048 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1261,6 +1261,7 @@ struct xgbe_prv_data { enum xgbe_rx kr_state; enum xgbe_rx kx_state; struct work_struct an_work; + unsigned int an_again; unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; -- cgit v1.2.3 From 2c81bfd5ae5659df44b38ec71c404b4b261a9515 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Thu, 22 Feb 2018 13:22:56 -0600 Subject: net/mlx5e: Move port speed code from en_ethtool.c to en/port.c Move four below functions from en_ethtool.c to en/port.c. These functions are used by both en_ethtool.c and en_main.c. Future code can use these functions without ethtool link mode dependency. u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(u32 speed); Delete the speed field from table mlx5e_build_ptys2ethtool_map. This table only keeps the mapping between the mlx5e link mode and ethtool link mode. Add new table mlx5e_link_speed for translation from mlx5e link mode to actual speed. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 - .../net/ethernet/mellanox/mlx5/core/en/Makefile | 1 + drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 129 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en/port.h | 43 +++++++ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 102 ++++++---------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 +- 8 files changed, 213 insertions(+), 72 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/port.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/port.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index a7135f5d5cf6..651cf3640420 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,7 +15,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bc91a7335c93..d13a86a1d702 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -932,8 +932,6 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); -int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c new file mode 100644 index 000000000000..9f04542f3661 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "port.h" + +/* speed in units of 1Mb */ +static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = 1000, + [MLX5E_1000BASE_KX] = 1000, + [MLX5E_10GBASE_CX4] = 10000, + [MLX5E_10GBASE_KX4] = 10000, + [MLX5E_10GBASE_KR] = 10000, + [MLX5E_20GBASE_KR2] = 20000, + [MLX5E_40GBASE_CR4] = 40000, + [MLX5E_40GBASE_KR4] = 40000, + [MLX5E_56GBASE_R4] = 56000, + [MLX5E_10GBASE_CR] = 10000, + [MLX5E_10GBASE_SR] = 10000, + [MLX5E_10GBASE_ER] = 10000, + [MLX5E_40GBASE_SR4] = 40000, + [MLX5E_40GBASE_LR4] = 40000, + [MLX5E_50GBASE_SR2] = 50000, + [MLX5E_100GBASE_CR4] = 100000, + [MLX5E_100GBASE_SR4] = 100000, + [MLX5E_100GBASE_KR4] = 100000, + [MLX5E_100GBASE_LR4] = 100000, + [MLX5E_100BASE_TX] = 100, + [MLX5E_1000BASE_T] = 1000, + [MLX5E_10GBASE_T] = 10000, + [MLX5E_25GBASE_CR] = 25000, + [MLX5E_25GBASE_KR] = 25000, + [MLX5E_25GBASE_SR] = 25000, + [MLX5E_50GBASE_CR2] = 50000, + [MLX5E_50GBASE_KR2] = 50000, +}; + +u32 mlx5e_port_ptys2speed(u32 eth_proto_oper) +{ + unsigned long temp = eth_proto_oper; + u32 speed = 0; + int i; + + i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER); + if (i < MLX5E_LINK_MODES_NUMBER) + speed = mlx5e_link_speed[i]; + + return speed; +} + +int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; + u32 eth_proto_oper; + int err; + + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + if (err) + return err; + + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + *speed = mlx5e_port_ptys2speed(eth_proto_oper); + if (!(*speed)) { + mlx5_core_warn(mdev, "cannot get port speed\n"); + err = -EINVAL; + } + + return err; +} + +int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 max_speed = 0; + u32 proto_cap; + int err; + int i; + + err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + if (err) + return err; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) + if (proto_cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, mlx5e_link_speed[i]); + + *speed = max_speed; + return 0; +} + +u32 mlx5e_port_speed2linkmodes(u32 speed) +{ + u32 link_modes = 0; + int i; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (mlx5e_link_speed[i] == speed) + link_modes |= MLX5E_PROT_MASK(i); + } + + return link_modes; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h new file mode 100644 index 000000000000..7aae38e98a65 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5E_EN_PORT_H +#define __MLX5E_EN_PORT_H + +#include +#include "en.h" + +u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); +int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +u32 mlx5e_port_speed2linkmodes(u32 speed); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2b786c4d3dab..42bd256e680d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -31,6 +31,7 @@ */ #include "en.h" +#include "en/port.h" void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) @@ -59,18 +60,16 @@ static void mlx5e_get_drvinfo(struct net_device *dev, struct ptys2ethtool_config { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); - u32 speed; }; static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; -#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \ ({ \ struct ptys2ethtool_config *cfg; \ const unsigned int modes[] = { __VA_ARGS__ }; \ unsigned int i; \ cfg = &ptys2ethtool_table[reg_]; \ - cfg->speed = speed_; \ bitmap_zero(cfg->supported, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ bitmap_zero(cfg->advertised, \ @@ -83,55 +82,55 @@ static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; void mlx5e_build_ptys2ethtool_map(void) { - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, ETHTOOL_LINK_MODE_10000baseT_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } @@ -617,43 +616,24 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings } } -int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) -{ - u32 max_speed = 0; - u32 proto_cap; - int err; - int i; - - err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); - if (err) - return err; - - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) - if (proto_cap & MLX5E_PROT_MASK(i)) - max_speed = max(max_speed, ptys2ethtool_table[i].speed); - - *speed = max_speed; - return 0; -} - static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_link_ksettings *link_ksettings) { - int i; u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; if (!netif_carrier_ok(netdev)) goto out; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_oper & MLX5E_PROT_MASK(i)) { - speed = ptys2ethtool_table[i].speed; - duplex = DUPLEX_FULL; - break; - } + speed = mlx5e_port_ptys2speed(eth_proto_oper); + if (!speed) { + speed = SPEED_UNKNOWN; + goto out; } + + duplex = DUPLEX_FULL; + out: link_ksettings->base.speed = speed; link_ksettings->base.duplex = duplex; @@ -811,18 +791,6 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) return ptys_modes; } -static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) -{ - u32 i, speed_links = 0; - - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].speed == speed) - speed_links |= MLX5E_PROT_MASK(i); - } - - return speed_links; -} - static int mlx5e_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *link_ksettings) { @@ -842,7 +810,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : - mlx5e_ethtool2ptys_speed_link(speed); + mlx5e_port_speed2linkmodes(speed); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5a7580b12fe..cee44c21766c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -46,6 +46,7 @@ #include "accel/ipsec.h" #include "accel/tls.h" #include "vxlan.h" +#include "en/port.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -4082,7 +4083,7 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) u32 link_speed = 0; u32 pci_bw = 0; - mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_port_max_linkspeed(mdev, &link_speed); pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", link_speed, pci_bw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 674f1d7d2737..a9c96fe8e4fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -52,6 +52,7 @@ #include "eswitch.h" #include "vxlan.h" #include "fs_core.h" +#include "en/port.h" struct mlx5_nic_flow_attr { u32 action; @@ -613,7 +614,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, params.q_counter = priv->q_counter; /* set hairpin pair per each 50Gbs share of the link */ - mlx5e_get_max_linkspeed(priv->mdev, &link_speed); + mlx5e_port_max_linkspeed(priv->mdev, &link_speed); link_speed = max_t(u32, link_speed, 50000); link_speed64 = link_speed; do_div(link_speed64, 50000); -- cgit v1.2.3 From 50b4a3c23646254c7345f3663ff1e0a6cbcd9abb Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Fri, 2 Mar 2018 15:47:01 -0600 Subject: net/mlx5: PPTB and PBMC register firmware command support Add firmware command interface to read and write PPTB and PBMC registers. PPTB register enables mappings priority to a specific receive buffer. PBMC registers enables changing the receive buffer's configuration such as buffer size, xon/xoff thresholds, buffer's lossy property and buffer's shared property. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 108 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en/port.h | 5 + include/linux/mlx5/driver.h | 2 + include/linux/mlx5/mlx5_ifc.h | 35 +++++++ 4 files changed, 150 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 9f04542f3661..24e3b564964f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -127,3 +127,111 @@ u32 mlx5e_port_speed2linkmodes(u32 speed) return link_modes; } + +int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out) +{ + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *in; + int err; + + in = kzalloc(sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(pbmc_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 0); + + kfree(in); + return err; +} + +int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in) +{ + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *out; + int err; + + out = kzalloc(sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(pbmc_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 1); + + kfree(out); + return err; +} + +/* buffer[i]: buffer that priority i mapped to */ +int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) +{ + int sz = MLX5_ST_SZ_BYTES(pptb_reg); + u32 prio_x_buff; + void *out; + void *in; + int prio; + int err; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(pptb_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0); + if (err) + goto out; + + prio_x_buff = MLX5_GET(pptb_reg, out, prio_x_buff); + for (prio = 0; prio < 8; prio++) { + buffer[prio] = (u8)(prio_x_buff >> (4 * prio)) & 0xF; + mlx5_core_dbg(mdev, "prio %d, buffer %d\n", prio, buffer[prio]); + } +out: + kfree(in); + kfree(out); + return err; +} + +int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) +{ + int sz = MLX5_ST_SZ_BYTES(pptb_reg); + u32 prio_x_buff; + void *out; + void *in; + int prio; + int err; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + /* First query the pptb register */ + MLX5_SET(pptb_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0); + if (err) + goto out; + + memcpy(in, out, sz); + MLX5_SET(pptb_reg, in, local_port, 1); + + /* Update the pm and prio_x_buff */ + MLX5_SET(pptb_reg, in, pm, 0xFF); + + prio_x_buff = 0; + for (prio = 0; prio < 8; prio++) + prio_x_buff |= (buffer[prio] << (4 * prio)); + MLX5_SET(pptb_reg, in, prio_x_buff, prio_x_buff); + + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 1); + +out: + kfree(in); + kfree(out); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index 7aae38e98a65..f8cbd8194179 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -40,4 +40,9 @@ u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(u32 speed); + +int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); +int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); +int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); +int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); #endif diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d703774982ca..92d292454351 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -124,6 +124,8 @@ enum { MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, + MLX5_REG_PPTB = 0x500b, + MLX5_REG_PBMC = 0x500c, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f687989d336b..edbddeaacc88 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -8788,6 +8788,41 @@ struct mlx5_ifc_qpts_reg_bits { u8 trust_state[0x3]; }; +struct mlx5_ifc_pptb_reg_bits { + u8 reserved_at_0[0x2]; + u8 mm[0x2]; + u8 reserved_at_4[0x4]; + u8 local_port[0x8]; + u8 reserved_at_10[0x6]; + u8 cm[0x1]; + u8 um[0x1]; + u8 pm[0x8]; + + u8 prio_x_buff[0x20]; + + u8 pm_msb[0x8]; + u8 reserved_at_48[0x10]; + u8 ctrl_buff[0x4]; + u8 untagged_buff[0x4]; +}; + +struct mlx5_ifc_pbmc_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 xoff_timer_value[0x10]; + u8 xoff_refresh[0x10]; + + u8 reserved_at_40[0x9]; + u8 fullness_threshold[0x7]; + u8 port_buffer_size[0x10]; + + struct mlx5_ifc_bufferx_reg_bits buffer[10]; + + u8 reserved_at_2e0[0x40]; +}; + struct mlx5_ifc_qtct_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; -- cgit v1.2.3 From 0696d60853d58f1b9431bc689e4e4526845b790a Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Wed, 21 Mar 2018 21:10:22 -0500 Subject: net/mlx5e: Receive buffer configuration Add APIs for buffer configuration based on the changes in pfc configuration, cable len, buffer size configuration, and priority to buffer mapping. Note that the xoff fomula is as below xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B] xoff_threshold = buffer_size - xoff xon_threshold = xoff_threshold - MTU Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 + .../ethernet/mellanox/mlx5/core/en/port_buffer.c | 327 +++++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en/port_buffer.h | 75 +++++ 4 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 651cf3640420..9efbf193ad5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -21,7 +21,7 @@ mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o -mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o +mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d13a86a1d702..9ab7158a7ce7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -65,6 +65,7 @@ struct page_pool; #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) +#define MLX5E_MAX_PRIORITY 8 #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 @@ -275,6 +276,10 @@ struct mlx5e_dcbx { /* The only setting that cannot be read from FW */ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; u8 cap; + + /* Buffer configuration */ + u32 cable_len; + u32 xoff; }; struct mlx5e_dcbx_dp { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c new file mode 100644 index 000000000000..c047da8752da --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "port_buffer.h" + +int mlx5e_port_query_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + u32 total_used = 0; + void *buffer; + void *out; + int err; + int i; + + out = kzalloc(sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5e_port_query_pbmc(mdev, out); + if (err) + goto out; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); + port_buffer->buffer[i].lossy = + MLX5_GET(bufferx_reg, buffer, lossy); + port_buffer->buffer[i].epsb = + MLX5_GET(bufferx_reg, buffer, epsb); + port_buffer->buffer[i].size = + MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->buffer[i].xon = + MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->buffer[i].xoff = + MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; + total_used += port_buffer->buffer[i].size; + + mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, + port_buffer->buffer[i].size, + port_buffer->buffer[i].xon, + port_buffer->buffer[i].xoff, + port_buffer->buffer[i].epsb, + port_buffer->buffer[i].lossy); + } + + port_buffer->port_buffer_size = + MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->spare_buffer_size = + port_buffer->port_buffer_size - total_used; + + mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n", + port_buffer->port_buffer_size, + port_buffer->spare_buffer_size); +out: + kfree(out); + return err; +} + +static int port_set_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *buffer; + void *in; + int err; + int i; + + in = kzalloc(sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + err = mlx5e_port_query_pbmc(mdev, in); + if (err) + goto out; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + + MLX5_SET(bufferx_reg, buffer, size, + port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); + MLX5_SET(bufferx_reg, buffer, lossy, + port_buffer->buffer[i].lossy); + MLX5_SET(bufferx_reg, buffer, xoff_threshold, + port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); + MLX5_SET(bufferx_reg, buffer, xon_threshold, + port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); + } + + err = mlx5e_port_set_pbmc(mdev, in); +out: + kfree(in); + return err; +} + +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ +static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) +{ + u32 speed; + u32 xoff; + int err; + + err = mlx5e_port_linkspeed(priv->mdev, &speed); + if (err) + return 0; + + xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; + + mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff); + return xoff; +} + +static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, + u32 xoff, unsigned int mtu) +{ + int i; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + if (port_buffer->buffer[i].lossy) { + port_buffer->buffer[i].xoff = 0; + port_buffer->buffer[i].xon = 0; + continue; + } + + if (port_buffer->buffer[i].size < + (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + return -ENOMEM; + + port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; + port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; + } + + return 0; +} + +/** + * update_buffer_lossy() + * mtu: device's MTU + * pfc_en: current pfc configuration + * buffer: current prio to buffer mapping + * xoff: xoff value + * port_buffer: port receive buffer configuration + * change: + * + * Update buffer configuration based on pfc configuraiton and priority + * to buffer mapping. + * Buffer's lossy bit is changed to: + * lossless if there is at least one PFC enabled priority mapped to this buffer + * lossy if all priorities mapped to this buffer are PFC disabled + * + * Return: + * Return 0 if no error. + * Set change to true if buffer configuration is modified. + */ +static int update_buffer_lossy(unsigned int mtu, + u8 pfc_en, u8 *buffer, u32 xoff, + struct mlx5e_port_buffer *port_buffer, + bool *change) +{ + bool changed = false; + u8 lossy_count; + u8 prio_count; + u8 lossy; + int prio; + int err; + int i; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + prio_count = 0; + lossy_count = 0; + + for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) { + if (buffer[prio] != i) + continue; + + prio_count++; + lossy_count += !(pfc_en & (1 << prio)); + } + + if (lossy_count == prio_count) + lossy = 1; + else /* lossy_count < prio_count */ + lossy = 0; + + if (lossy != port_buffer->buffer[i].lossy) { + port_buffer->buffer[i].lossy = lossy; + changed = true; + } + } + + if (changed) { + err = update_xoff_threshold(port_buffer, xoff, mtu); + if (err) + return err; + + *change = true; + } + + return 0; +} + +int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + struct ieee_pfc *pfc, + u32 *buffer_size, + u8 *prio2buffer) +{ + struct mlx5e_port_buffer port_buffer; + u32 xoff = calculate_xoff(priv, mtu); + bool update_prio2buffer = false; + u8 buffer[MLX5E_MAX_PRIORITY]; + bool update_buffer = false; + u32 total_used = 0; + u8 curr_pfc_en; + int err; + int i; + + mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_PFC) { + err = mlx5e_port_query_priority2buffer(priv->mdev, buffer); + if (err) + return err; + + err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, + &port_buffer, &update_buffer); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { + update_prio2buffer = true; + err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); + if (err) + return err; + + err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, + &port_buffer, &update_buffer); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_SIZE) { + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]); + if (!port_buffer.buffer[i].lossy && !buffer_size[i]) { + mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n", + __func__, i); + return -EINVAL; + } + + port_buffer.buffer[i].size = buffer_size[i]; + total_used += buffer_size[i]; + } + + mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used); + + if (total_used > port_buffer.port_buffer_size) + return -EINVAL; + + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + + /* Need to update buffer configuration if xoff value is changed */ + if (!update_buffer && xoff != priv->dcbx.xoff) { + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + priv->dcbx.xoff = xoff; + + /* Apply the settings */ + if (update_buffer) { + err = port_set_buffer(priv, &port_buffer); + if (err) + return err; + } + + if (update_prio2buffer) + err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h new file mode 100644 index 000000000000..34f55b81a0de --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_PORT_BUFFER_H__ +#define __MLX5_EN_PORT_BUFFER_H__ + +#include "en.h" +#include "port.h" + +#define MLX5E_MAX_BUFFER 8 +#define MLX5E_BUFFER_CELL_SHIFT 7 +#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ + +#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ + MLX5_CAP_PCAM_REG(mdev, pbmc) && \ + MLX5_CAP_PCAM_REG(mdev, pptb)) + +enum { + MLX5E_PORT_BUFFER_CABLE_LEN = BIT(0), + MLX5E_PORT_BUFFER_PFC = BIT(1), + MLX5E_PORT_BUFFER_PRIO2BUFFER = BIT(2), + MLX5E_PORT_BUFFER_SIZE = BIT(3), +}; + +struct mlx5e_bufferx_reg { + u8 lossy; + u8 epsb; + u32 size; + u32 xoff; + u32 xon; +}; + +struct mlx5e_port_buffer { + u32 port_buffer_size; + u32 spare_buffer_size; + struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; +}; + +int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + struct ieee_pfc *pfc, + u32 *buffer_size, + u8 *prio2buffer); + +int mlx5e_port_query_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer); +#endif -- cgit v1.2.3 From ecdf2dadee8e8c5015771b802a9851ff332d3fc4 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Wed, 21 Mar 2018 21:39:17 -0500 Subject: net/mlx5e: Receive buffer support for DCBX Add dcbnl's set/get buffer configuration callback that allows user to set/get buffer size configuration and priority to buffer mapping. By default, firmware controls receive buffer configuration and priority of buffer mapping based on the changes in pfc settings. When set buffer call back is triggered, the buffer configuration changes to manual mode. The manual mode means mlx5 driver will adjust the buffer configuration accordingly based on the changes in pfc settings. ConnectX buffer stride is 128 Bytes. If the buffer size is not multiple of 128, the buffer size will be rounded down to the nearest multiple of 128. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 132 +++++++++++++++++++-- 2 files changed, 126 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9ab7158a7ce7..c5c7a6d687ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -278,6 +278,7 @@ struct mlx5e_dcbx { u8 cap; /* Buffer configuration */ + bool manual_buffer; u32 cable_len; u32 xoff; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c641d5656b2d..0a52f31fef37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -32,8 +32,8 @@ #include #include #include "en.h" - -#define MLX5E_MAX_PRIORITY 8 +#include "en/port.h" +#include "en/port_buffer.h" #define MLX5E_100MB (100000) #define MLX5E_1GB (1000000) @@ -41,6 +41,9 @@ #define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_DOWN 0 +/* Max supported cable length is 1000 meters */ +#define MLX5E_MAX_CABLE_LENGTH 1000 + enum { MLX5E_VENDOR_TC_GROUP_NUM = 7, MLX5E_LOWEST_PRIO_GROUP = 0, @@ -338,6 +341,9 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause); } + if (MLX5_BUFFER_SUPPORTED(mdev)) + pfc->delay = priv->dcbx.cable_len; + return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); } @@ -346,16 +352,39 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + u32 old_cable_len = priv->dcbx.cable_len; + struct ieee_pfc pfc_new; + u32 changed = 0; u8 curr_pfc_en; - int ret; + int ret = 0; + /* pfc_en */ mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL); + if (pfc->pfc_en != curr_pfc_en) { + ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); + if (ret) + return ret; + mlx5_toggle_port_link(mdev); + changed |= MLX5E_PORT_BUFFER_PFC; + } - if (pfc->pfc_en == curr_pfc_en) - return 0; + if (pfc->delay && + pfc->delay < MLX5E_MAX_CABLE_LENGTH && + pfc->delay != priv->dcbx.cable_len) { + priv->dcbx.cable_len = pfc->delay; + changed |= MLX5E_PORT_BUFFER_CABLE_LEN; + } - ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); - mlx5_toggle_port_link(mdev); + if (MLX5_BUFFER_SUPPORTED(mdev)) { + pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en; + if (priv->dcbx.manual_buffer) + ret = mlx5e_port_manual_buffer_config(priv, changed, + dev->mtu, &pfc_new, + NULL, NULL); + + if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN)) + priv->dcbx.cable_len = old_cable_len; + } if (!ret) { mlx5e_dbg(HW, priv, @@ -873,6 +902,90 @@ static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state) cee_cfg->pfc_enable = state; } +static int mlx5e_dcbnl_getbuffer(struct net_device *dev, + struct dcbnl_buffer *dcb_buffer) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_port_buffer port_buffer; + u8 buffer[MLX5E_MAX_PRIORITY]; + int i, err; + + if (!MLX5_BUFFER_SUPPORTED(mdev)) + return -EOPNOTSUPP; + + err = mlx5e_port_query_priority2buffer(mdev, buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) + dcb_buffer->prio2buffer[i] = buffer[i]; + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) + dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; + dcb_buffer->total_size = port_buffer.port_buffer_size; + + return 0; +} + +static int mlx5e_dcbnl_setbuffer(struct net_device *dev, + struct dcbnl_buffer *dcb_buffer) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_port_buffer port_buffer; + u8 old_prio2buffer[MLX5E_MAX_PRIORITY]; + u32 *buffer_size = NULL; + u8 *prio2buffer = NULL; + u32 changed = 0; + int i, err; + + if (!MLX5_BUFFER_SUPPORTED(mdev)) + return -EOPNOTSUPP; + + for (i = 0; i < DCBX_MAX_BUFFERS; i++) + mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]); + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) + mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]); + + err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) { + if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) { + changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER; + prio2buffer = dcb_buffer->prio2buffer; + break; + } + } + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { + changed |= MLX5E_PORT_BUFFER_SIZE; + buffer_size = dcb_buffer->buffer_size; + break; + } + } + + if (!changed) + return 0; + + priv->dcbx.manual_buffer = true; + err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL, + buffer_size, prio2buffer); + return err; +} + const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, @@ -884,6 +997,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_delapp = mlx5e_dcbnl_ieee_delapp, .getdcbx = mlx5e_dcbnl_getdcbx, .setdcbx = mlx5e_dcbnl_setdcbx, + .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer, + .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer, /* CEE interfaces */ .setall = mlx5e_dcbnl_setall, @@ -1091,5 +1206,8 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + priv->dcbx.manual_buffer = false; + priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; + mlx5e_ets_init(priv); } -- cgit v1.2.3 From 735fc4054b3a25034445c6713d259da0f96f8131 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 24 May 2018 16:46:12 +0200 Subject: xdp: change ndo_xdp_xmit API to support bulking This patch change the API for ndo_xdp_xmit to support bulking xdp_frames. When kernel is compiled with CONFIG_RETPOLINE, XDP sees a huge slowdown. Most of the slowdown is caused by DMA API indirect function calls, but also the net_device->ndo_xdp_xmit() call. Benchmarked patch with CONFIG_RETPOLINE, using xdp_redirect_map with single flow/core test (CPU E5-1650 v4 @ 3.60GHz), showed performance improved: for driver ixgbe: 6,042,682 pps -> 6,853,768 pps = +811,086 pps for driver i40e : 6,187,169 pps -> 6,724,519 pps = +537,350 pps With frames avail as a bulk inside the driver ndo_xdp_xmit call, further optimizations are possible, like bulk DMA-mapping for TX. Testing without CONFIG_RETPOLINE show the same performance for physical NIC drivers. The virtual NIC driver tun sees a huge performance boost, as it can avoid doing per frame producer locking, but instead amortize the locking cost over the bulk. V2: Fix compile errors reported by kbuild test robot V4: Isolated ndo, driver changes and callers. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 ++++++++--- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 21 ++++++--- drivers/net/tun.c | 37 +++++++++------ drivers/net/virtio_net.c | 66 ++++++++++++++++++++------- include/linux/netdevice.h | 14 ++++-- kernel/bpf/devmap.c | 29 +++++++----- net/core/filter.c | 8 ++-- 8 files changed, 139 insertions(+), 64 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5efa68de935b..9b698c5acd05 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3664,14 +3664,19 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * @dev: netdev * @xdp: XDP buffer * - * Returns Zero if sent, else an error code + * Returns number of frames successfully sent. Frames that fail are + * free'ed via XDP return API. + * + * For error cases, a negative errno code is returned and no-frames + * are transmitted (caller must handle freeing frames). **/ -int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; - int err; + int drops = 0; + int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; @@ -3679,11 +3684,18 @@ int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; - err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]); - if (err != I40E_XDP_TX) - return -ENOSPC; + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; - return 0; + err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]); + if (err != I40E_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + return n - drops; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index fdd2c55f03a6..eb8804b3d7b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -487,7 +487,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); -int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf); +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames); void i40e_xdp_flush(struct net_device *dev); /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6652b201df5b..9645619f7729 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10017,11 +10017,13 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) +static int ixgbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; - int err; + int drops = 0; + int i; if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; @@ -10033,11 +10035,18 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) if (unlikely(!ring)) return -ENXIO; - err = ixgbe_xmit_xdp_ring(adapter, xdpf); - if (err != IXGBE_XDP_TX) - return -ENOSPC; + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; - return 0; + err = ixgbe_xmit_xdp_ring(adapter, xdpf); + if (err != IXGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + return n - drops; } static void ixgbe_xdp_flush(struct net_device *dev) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 44d4f3d25350..d3dcfcb1c4b3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -1290,34 +1291,44 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; -static int tun_xdp_xmit(struct net_device *dev, struct xdp_frame *frame) +static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; - int ret = 0; + int drops = 0; + int cnt = n; + int i; rcu_read_lock(); numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { - ret = -ENOSPC; - goto out; + rcu_read_unlock(); + return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); - /* Encode the XDP flag into lowest bit for consumer to differ - * XDP buffer from sk_buff. - */ - if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(frame))) { - this_cpu_inc(tun->pcpu_stats->tx_dropped); - ret = -ENOSPC; + + spin_lock(&tfile->tx_ring.producer_lock); + for (i = 0; i < n; i++) { + struct xdp_frame *xdp = frames[i]; + /* Encode the XDP flag into lowest bit for consumer to differ + * XDP buffer from sk_buff. + */ + void *frame = tun_xdp_to_ptr(xdp); + + if (__ptr_ring_produce(&tfile->tx_ring, frame)) { + this_cpu_inc(tun->pcpu_stats->tx_dropped); + xdp_return_frame_rx_napi(xdp); + drops++; + } } + spin_unlock(&tfile->tx_ring.producer_lock); -out: rcu_read_unlock(); - return ret; + return cnt - drops; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) @@ -1327,7 +1338,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) if (unlikely(!frame)) return -EOVERFLOW; - return tun_xdp_xmit(dev, frame); + return tun_xdp_xmit(dev, 1, &frame); } static void tun_xdp_flush(struct net_device *dev) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f34794a76c4d..39a0783d1cde 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -419,23 +419,13 @@ static void virtnet_xdp_flush(struct net_device *dev) virtqueue_kick(sq->vq); } -static int __virtnet_xdp_xmit(struct virtnet_info *vi, - struct xdp_frame *xdpf) +static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, + struct send_queue *sq, + struct xdp_frame *xdpf) { struct virtio_net_hdr_mrg_rxbuf *hdr; - struct xdp_frame *xdpf_sent; - struct send_queue *sq; - unsigned int len; - unsigned int qp; int err; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; - - /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); - /* virtqueue want to use data area in-front of packet */ if (unlikely(xdpf->metasize > 0)) return -EOPNOTSUPP; @@ -459,11 +449,40 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi, return 0; } -static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) +static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, + struct xdp_frame *xdpf) +{ + struct xdp_frame *xdpf_sent; + struct send_queue *sq; + unsigned int len; + unsigned int qp; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; + + /* Free up any pending old buffers before queueing new ones. */ + while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) + xdp_return_frame(xdpf_sent); + + return __virtnet_xdp_xmit_one(vi, sq, xdpf); +} + +static int virtnet_xdp_xmit(struct net_device *dev, + int n, struct xdp_frame **frames) { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; + struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; + struct send_queue *sq; + unsigned int len; + unsigned int qp; + int drops = 0; + int err; + int i; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. @@ -472,7 +491,20 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) if (!xdp_prog) return -ENXIO; - return __virtnet_xdp_xmit(vi, xdpf); + /* Free up any pending old buffers before queueing new ones. */ + while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) + xdp_return_frame(xdpf_sent); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + err = __virtnet_xdp_xmit_one(vi, sq, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + return n - drops; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -616,7 +648,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - err = __virtnet_xdp_xmit(vi, xdpf); + err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; @@ -779,7 +811,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - err = __virtnet_xdp_xmit(vi, xdpf); + err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 03ed492c4e14..debdb6286170 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1185,9 +1185,13 @@ struct dev_ifalias { * This function is used to set or query state related to XDP on the * netdevice and manage BPF offload. See definition of * enum bpf_netdev_command for details. - * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_frame *xdp); - * This function is used to submit a XDP packet for transmit on a - * netdevice. + * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp); + * This function is used to submit @n XDP packets for transmit on a + * netdevice. Returns number of frames successfully transmitted, frames + * that got dropped are freed/returned via xdp_return_frame(). + * Returns negative number, means general error invoking ndo, meaning + * no frames were xmit'ed and core-caller will free all frames. + * TODO: Consider add flag to allow sending flush operation. * void (*ndo_xdp_flush)(struct net_device *dev); * This function is used to inform the driver to flush a particular * xdp tx queue. Must be called on same CPU as xdp_xmit. @@ -1375,8 +1379,8 @@ struct net_device_ops { int needed_headroom); int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); - int (*ndo_xdp_xmit)(struct net_device *dev, - struct xdp_frame *xdp); + int (*ndo_xdp_xmit)(struct net_device *dev, int n, + struct xdp_frame **xdp); void (*ndo_xdp_flush)(struct net_device *dev); }; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index a9cd5c93dd2b..77908311ec98 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -232,24 +232,31 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, prefetch(xdpf); } - for (i = 0; i < bq->count; i++) { - struct xdp_frame *xdpf = bq->q[i]; - int err; - - err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); - if (err) { - drops++; - xdp_return_frame_rx_napi(xdpf); - } else { - sent++; - } + sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q); + if (sent < 0) { + sent = 0; + goto error; } + drops = bq->count - sent; +out: bq->count = 0; trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, sent, drops, bq->dev_rx, dev); bq->dev_rx = NULL; return 0; +error: + /* If ndo_xdp_xmit fails with an errno, no frames have been + * xmit'ed and it's our responsibility to them free all. + */ + for (i = 0; i < bq->count; i++) { + struct xdp_frame *xdpf = bq->q[i]; + + /* RX path under NAPI protection, can return frames faster */ + xdp_return_frame_rx_napi(xdpf); + drops++; + } + goto out; } /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled diff --git a/net/core/filter.c b/net/core/filter.c index 36cf2f87d742..1d75f9322275 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3039,7 +3039,7 @@ static int __bpf_tx_xdp(struct net_device *dev, u32 index) { struct xdp_frame *xdpf; - int err; + int sent; if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; @@ -3049,9 +3049,9 @@ static int __bpf_tx_xdp(struct net_device *dev, if (unlikely(!xdpf)) return -EOVERFLOW; - err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); - if (err) - return err; + sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf); + if (sent <= 0) + return sent; dev->netdev_ops->ndo_xdp_flush(dev); return 0; } -- cgit v1.2.3 From d624613e422d9bda9e4f066281b1f178ed51f0b1 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 22 May 2018 15:07:18 +0800 Subject: cxgb4: Check for kvzalloc allocation failure t4_prep_fw doesn't check for card_fw pointer before store the read data, which could lead to a NULL pointer dereference if kvzalloc failed. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 513e1d356384..8405187f8e3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4140,6 +4140,10 @@ static int adap_init0(struct adapter *adap) * card */ card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); + if (!card_fw) { + ret = -ENOMEM; + goto bye; + } /* Get FW from from /lib/firmware/ */ ret = request_firmware(&fw, fw_info->fw_mod_name, -- cgit v1.2.3 From c3f22415474d36194cb7785d9a236523bce0d829 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:37:55 -0500 Subject: ibmvnic: Mark NAPI flag as disabled when released Set adapter NAPI state as disabled if they are removed. This will allow them to be enabled again if reallocated in case of a hard reset. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4bb4646a5f92..eabc1e445137 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -789,6 +789,7 @@ static void release_napi(struct ibmvnic_adapter *adapter) kfree(adapter->napi); adapter->napi = NULL; adapter->num_active_rx_napi = 0; + adapter->napi_enabled = false; } static int ibmvnic_login(struct net_device *netdev) -- cgit v1.2.3 From 5153698e551b4b824e3d35da59914e1b4f815baa Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:37:56 -0500 Subject: ibmvnic: Introduce active CRQ state Introduce an "active" state for a IBM vNIC Command-Response Queue. A CRQ is considered active once it has initialized or linked with its partner by sending an initialization request and getting a successful response back from the management partition. Until this has happened, do not allow CRQ commands to be sent other than the initialization request. This change will avoid a protocol error in case of a device transport event occurring during a initialization. When the driver receives a transport event notification indicating that the backing hardware has changed and needs reinitialization, any further commands other than the initialization handshake with the VIOS management partition will result in an invalid state error. Instead of sending a command that will be returned with an error, print a warning and return an error that will be handled by the caller. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 10 ++++++++++ drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 11 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index eabc1e445137..e6a081c13661 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3147,6 +3147,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, (unsigned long int)cpu_to_be64(u64_crq[0]), (unsigned long int)cpu_to_be64(u64_crq[1])); + if (!adapter->crq.active && + crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { + dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); + return -EINVAL; + } + /* Make sure the hypervisor sees the complete request */ mb(); @@ -4225,6 +4231,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); + adapter->crq.active = true; send_version_xchg(adapter); break; default: @@ -4233,6 +4240,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, return; case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); + adapter->crq.active = false; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Migrated, re-enabling adapter\n"); ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); @@ -4420,6 +4428,7 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) /* Clean out the queue */ memset(crq->msgs, 0, PAGE_SIZE); crq->cur = 0; + crq->active = false; /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, @@ -4454,6 +4463,7 @@ static void release_crq_queue(struct ibmvnic_adapter *adapter) DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); crq->msgs = NULL; + crq->active = false; } static int init_crq_queue(struct ibmvnic_adapter *adapter) diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22391e8805f6..edfc312d3523 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -865,6 +865,7 @@ struct ibmvnic_crq_queue { int size, cur; dma_addr_t msg_token; spinlock_t lock; + bool active; }; union sub_crq { -- cgit v1.2.3 From 9c4eaabd1bb392e30c2d40d60e60d68315cf0c1f Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:37:57 -0500 Subject: ibmvnic: Check CRQ command return codes Check whether CRQ command is successful before awaiting a response from the management partition. If the command was not successful, the driver may hang waiting for a response that will never come. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 51 +++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index e6a081c13661..70835198a2f9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -109,8 +109,8 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, struct ibmvnic_sub_crq_queue *); static int ibmvnic_poll(struct napi_struct *napi, int data); static void send_map_query(struct ibmvnic_adapter *adapter); -static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); -static void send_request_unmap(struct ibmvnic_adapter *, u8); +static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); +static int send_request_unmap(struct ibmvnic_adapter *, u8); static int send_login(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); @@ -172,6 +172,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; + int rc; ltb->size = size; ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, @@ -185,8 +186,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, adapter->map_id++; init_completion(&adapter->fw_done); - send_request_map(adapter, ltb->addr, - ltb->size, ltb->map_id); + rc = send_request_map(adapter, ltb->addr, + ltb->size, ltb->map_id); + if (rc) { + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + return rc; + } wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { @@ -215,10 +220,14 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, static int reset_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb) { + int rc; + memset(ltb->buff, 0, ltb->size); init_completion(&adapter->fw_done); - send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { @@ -952,6 +961,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; int len = 0; + int rc; if (adapter->vpd->buff) len = adapter->vpd->len; @@ -959,7 +969,9 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) init_completion(&adapter->fw_done); crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); if (!adapter->vpd->len) @@ -992,7 +1004,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) crq.get_vpd.cmd = GET_VPD; crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) { + kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; + return rc; + } wait_for_completion(&adapter->fw_done); return 0; @@ -1691,6 +1708,7 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; union ibmvnic_crq crq; + int rc; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -1701,7 +1719,9 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); init_completion(&adapter->fw_done); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ return adapter->fw_done_rc ? -EIO : 0; @@ -2365,6 +2385,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, struct ibmvnic_adapter *adapter = netdev_priv(dev); union ibmvnic_crq crq; int i, j; + int rc; memset(&crq, 0, sizeof(crq)); crq.request_statistics.first = IBMVNIC_CRQ_CMD; @@ -2375,7 +2396,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, /* Wait for data to be written */ init_completion(&adapter->stats_done); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return; wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) @@ -3377,8 +3400,8 @@ buf_alloc_failed: return -1; } -static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, - u32 len, u8 map_id) +static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, + u32 len, u8 map_id) { union ibmvnic_crq crq; @@ -3388,10 +3411,10 @@ static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, crq.request_map.map_id = map_id; crq.request_map.ioba = cpu_to_be32(addr); crq.request_map.len = cpu_to_be32(len); - ibmvnic_send_crq(adapter, &crq); + return ibmvnic_send_crq(adapter, &crq); } -static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) +static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) { union ibmvnic_crq crq; @@ -3399,7 +3422,7 @@ static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) crq.request_unmap.first = IBMVNIC_CRQ_CMD; crq.request_unmap.cmd = REQUEST_UNMAP; crq.request_unmap.map_id = map_id; - ibmvnic_send_crq(adapter, &crq); + return ibmvnic_send_crq(adapter, &crq); } static void send_map_query(struct ibmvnic_adapter *adapter) -- cgit v1.2.3 From 17c8705838a5acafbc77079c72378fc7e0f0a876 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:37:58 -0500 Subject: ibmvnic: Return error code if init interrupted by transport event If device init is interrupted by a failover, set the init return code so that it can be checked and handled appropriately by the init routine. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 70835198a2f9..f1f744ebb14e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4249,7 +4249,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_info(dev, "Partner initialized\n"); adapter->from_passive_init = true; adapter->failover_pending = false; - complete(&adapter->init_done); + if (!completion_done(&adapter->init_done)) { + complete(&adapter->init_done); + adapter->init_done_rc = -EIO; + } ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); break; case IBMVNIC_CRQ_INIT_COMPLETE: -- cgit v1.2.3 From ab5ec33b9ac285d01a0e0fa94b0fdcb64262b928 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:37:59 -0500 Subject: ibmvnic: Handle error case when setting link state If setting the link state is not successful, print a warning with the resulting return code and return it to be handled by the caller. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index f1f744ebb14e..b1bbd5bcc129 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -929,6 +929,10 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) /* Partuial success, delay and re-send */ mdelay(1000); resend = true; + } else if (adapter->init_done_rc) { + netdev_warn(netdev, "Unable to set link state, rc=%d\n", + adapter->init_done_rc); + return adapter->init_done_rc; } } while (resend); -- cgit v1.2.3 From 8a348450a036d690904ca54a476cbb4939ca8f95 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:38:00 -0500 Subject: ibmvnic: Create separate initialization routine for resets Instead of having one initialization routine for all cases, create a separate, simpler function for standard initialization, such as during device probe. Use the original initialization function to handle device reset scenarios. The goal of this patch is to avoid having a single, cluttered init function to handle all possible scenarios. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 48 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b1bbd5bcc129..f26e1f893441 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -116,6 +116,7 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int ibmvnic_init(struct ibmvnic_adapter *); +static int ibmvnic_reset_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); static int init_crq_queue(struct ibmvnic_adapter *adapter); @@ -1807,7 +1808,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, return rc; } - rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter); if (rc) return IBMVNIC_INIT_FAILED; @@ -4571,7 +4572,7 @@ map_failed: return retrc; } -static int ibmvnic_init(struct ibmvnic_adapter *adapter) +static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); @@ -4630,6 +4631,49 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) return rc; } +static int ibmvnic_init(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long timeout = msecs_to_jiffies(30000); + int rc; + + adapter->from_passive_init = false; + + init_completion(&adapter->init_done); + adapter->init_done_rc = 0; + ibmvnic_send_crq_init(adapter); + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { + dev_err(dev, "Initialization sequence timed out\n"); + return -1; + } + + if (adapter->init_done_rc) { + release_crq_queue(adapter); + return adapter->init_done_rc; + } + + if (adapter->from_passive_init) { + adapter->state = VNIC_OPEN; + adapter->from_passive_init = false; + return -1; + } + + rc = init_sub_crqs(adapter); + if (rc) { + dev_err(dev, "Initialization of sub crqs failed\n"); + release_crq_queue(adapter); + return rc; + } + + rc = init_sub_crq_irqs(adapter); + if (rc) { + dev_err(dev, "Failed to initialize sub crq irqs\n"); + release_crq_queue(adapter); + } + + return rc; +} + static struct device_attribute dev_attr_failover; static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) -- cgit v1.2.3 From 06e43d7f9fe536621091ca5b87dc6b8498898226 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:38:01 -0500 Subject: ibmvnic: Set resetting state at earliest possible point Set device resetting state at the earliest possible point: as soon as a reset is successfully scheduled. The reset state is toggled off when all resets have been processed to completion. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index f26e1f893441..ee51deba7a12 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1919,7 +1919,6 @@ static void __ibmvnic_reset(struct work_struct *work) netdev = adapter->netdev; mutex_lock(&adapter->reset_lock); - adapter->resetting = true; reset_state = adapter->state; rwi = get_next_rwi(adapter); @@ -1994,7 +1993,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); - + adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); -- cgit v1.2.3 From 2770a7984db588913e11a6dfcfe3461dbba9b7b2 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 23 May 2018 13:38:02 -0500 Subject: ibmvnic: Introduce hard reset recovery Introduce a recovery hard reset to handle reset failure as a result of change of device context following a transport event, such as a backing device failover or partition migration. These operations reset the device context to its initial state. If this occurs during a reset, any initialization commands are likely to fail with an invalid state error as backing device firmware requests reinitialization. When this happens, make one more attempt by performing a hard reset, which frees any resources currently allocated and performs device initialization. If a transport event occurs during a device reset, a flag is set which will trigger a new hard reset following the completionof the current reset event. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 101 +++++++++++++++++++++++++++++++++++-- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 98 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ee51deba7a12..09f8e6baf049 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1878,6 +1878,85 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } +static int do_hard_reset(struct ibmvnic_adapter *adapter, + struct ibmvnic_rwi *rwi, u32 reset_state) +{ + struct net_device *netdev = adapter->netdev; + int rc; + + netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", + rwi->reset_reason); + + netif_carrier_off(netdev); + adapter->reset_reason = rwi->reset_reason; + + ibmvnic_cleanup(netdev); + release_resources(adapter); + release_sub_crqs(adapter, 0); + release_crq_queue(adapter); + + /* remove the closed state so when we call open it appears + * we are coming from the probed state. + */ + adapter->state = VNIC_PROBED; + + rc = init_crq_queue(adapter); + if (rc) { + netdev_err(adapter->netdev, + "Couldn't initialize crq. rc=%d\n", rc); + return rc; + } + + rc = ibmvnic_init(adapter); + if (rc) + return rc; + + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0; + + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = VNIC_PROBED; + return 0; + } + /* netif_set_real_num_xx_queues needs to take rtnl lock here + * unless wait_for_reset is set, in which case the rtnl lock + * has already been taken before initializing the reset + */ + if (!adapter->wait_for_reset) { + rtnl_lock(); + rc = init_resources(adapter); + rtnl_unlock(); + } else { + rc = init_resources(adapter); + } + if (rc) + return rc; + + ibmvnic_disable_irqs(adapter); + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + + rc = __ibmvnic_open(netdev); + if (rc) { + if (list_empty(&adapter->rwi_list)) + adapter->state = VNIC_CLOSED; + else + adapter->state = reset_state; + + return 0; + } + + netif_carrier_on(netdev); + + return 0; +} + static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) { struct ibmvnic_rwi *rwi; @@ -1923,9 +2002,15 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { - rc = do_reset(adapter, rwi, reset_state); + if (adapter->force_reset_recovery) { + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + } else { + rc = do_reset(adapter, rwi, reset_state); + } kfree(rwi); - if (rc && rc != IBMVNIC_INIT_FAILED) + if (rc && rc != IBMVNIC_INIT_FAILED && + !adapter->force_reset_recovery) break; rwi = get_next_rwi(adapter); @@ -1951,9 +2036,9 @@ static void __ibmvnic_reset(struct work_struct *work) static int ibmvnic_reset(struct ibmvnic_adapter *adapter, enum ibmvnic_reset_reason reason) { + struct list_head *entry, *tmp_entry; struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; - struct list_head *entry; int ret; if (adapter->state == VNIC_REMOVING || @@ -1989,7 +2074,13 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ret = ENOMEM; goto err; } - + /* if we just received a transport event, + * flush reset queue and process this reset + */ + if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) + list_del(entry); + } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); @@ -4271,6 +4362,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); adapter->crq.active = false; + if (adapter->resetting) + adapter->force_reset_recovery = true; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Migrated, re-enabling adapter\n"); ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index edfc312d3523..f9fb780102ac 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1109,6 +1109,7 @@ struct ibmvnic_adapter { bool mac_change_pending; bool failover_pending; + bool force_reset_recovery; struct ibmvnic_tunables desired; struct ibmvnic_tunables fallback; -- cgit v1.2.3 From d97cde6ab547a8115fb4fae73f030a96519ac3c6 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 23 May 2018 18:02:00 -0700 Subject: hv_netvsc: fix bogus ifalias on network device If the guest network adapter is not configured with DeviceNaming enabled on the host, then the query for friendly name will return success but with a zero length name. Which then leads to a garbage value (stack contents) for ifalias. Fix is simple, just don't set name if host doesn't return it. Fixes: 0fe554a46a0f ("hv_netvsc: propogate Hyper-V friendly name into interface alias") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/rndis_filter.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7f3dab4b4cbc..5428bb261102 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1237,7 +1237,10 @@ static void rndis_get_friendly_name(struct net_device *net, if (rndis_filter_query_device(rndis_device, net_device, RNDIS_OID_GEN_FRIENDLY_NAME, wname, &size) != 0) - return; + return; /* ignore if host does not support */ + + if (size == 0) + return; /* name not set */ /* Convert Windows Unicode string to UTF-8 */ len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias)); -- cgit v1.2.3 From 24f132e29c041cc0654e5aa8ab1127d45d4b46d3 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:48 -0700 Subject: nfp: add ndo_set_mac_address for representors Adding a netdev to a bond requires that its mac address can be modified. The default eth_mac_addr is sufficient to satisfy this requirement. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 09e87d5f4f72..117eca6819de 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -277,6 +277,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_set_features = nfp_port_set_features, + .ndo_set_mac_address = eth_mac_addr, }; static void nfp_repr_clean(struct nfp_repr *repr) -- cgit v1.2.3 From 1945ca7a81d64e09ff0bd24deb64a328614f7b7b Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:49 -0700 Subject: nfp: nfpcore: add rtsym writing function Add an rtsym API function that combines the lookup of a symbol and the writing of a value to it. Values can be written as unsigned 32 or 64 bits. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 2 + .../net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 43 ++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index c9724fb7ea4b..df599d5b6bb3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -100,6 +100,8 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error); +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value); u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 46107aefad1c..9e34216578da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -286,6 +286,49 @@ exit: return val; } +/** + * nfp_rtsym_write_le() - Write an unsigned scalar value to a symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @value: Value to write + * + * Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size. + * If 4 bytes then the lower 32-bits of 'value' are used. Value will be + * written as simple little-endian unsigned value. + * + * Return: 0 on success or error code. + */ +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value) +{ + const struct nfp_rtsym *sym; + int err; + u32 id; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) + return -ENOENT; + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + + switch (sym->size) { + case 4: + err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value); + break; + case 8: + err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value); + break; + default: + nfp_err(rtbl->cpp, + "rtsym '%s' unsupported or non-scalar size: %lld\n", + name, sym->size); + err = -EINVAL; + break; + } + + return err; +} + u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area) -- cgit v1.2.3 From 898bc7d634b4ffbdc3511212625c735d400a8cc6 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:50 -0700 Subject: nfp: flower: check for/turn on LAG support in firmware Check if the fw contains the _abi_flower_balance_sync_enable symbol. If it does then write a 1 to this indicating that the driver is willing to receive NIC to kernel LAG related control messages. If the write is successful, update the list of extra features supported by the fw and add a stub to accept LAG cmsgs. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 5 +++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 1 + drivers/net/ethernet/netronome/nfp/flower/main.c | 12 ++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.h | 1 + 4 files changed, 19 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 577659f332e4..03aae2ed9983 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -239,6 +239,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { + struct nfp_flower_priv *app_priv = app->priv; struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; @@ -258,6 +259,10 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + break; + /* fall through */ default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index bee4367a2c38..3a42a1fc55cb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -366,6 +366,7 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 4e67c0cbf9f0..1910c3e2b3e5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -546,8 +546,20 @@ static int nfp_flower_init(struct nfp_app *app) else app_priv->flower_ext_feats = features; + /* Tell the firmware that the driver supports lag. */ + err = nfp_rtsym_write_le(app->pf->rtbl, + "_abi_flower_balance_sync_enable", 1); + if (!err) + app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; + else if (err == -ENOENT) + nfp_warn(app->cpp, "LAG not supported by FW.\n"); + else + goto err_cleanup_metadata; + return 0; +err_cleanup_metadata: + nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); return err; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 733ff53cc601..6e82aa4ed84b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -67,6 +67,7 @@ struct nfp_app; /* Extra features bitmap. */ #define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_NBI_MTU_SETTING BIT(1) +#define NFP_FL_FEATS_LAG BIT(31) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; -- cgit v1.2.3 From b945245297416a3c68ed12f2ada1c7162f5f73fd Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:51 -0700 Subject: nfp: flower: add per repr private data for LAG offload Add a bitmap to each flower repr to track its state if it is enslaved by a bond. This LAG state may be different to the port state - for example, the port may be up but LAG state may be down due to the selection in an active/backup bond. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.c | 26 ++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.h | 8 ++++++++ 2 files changed, 34 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 1910c3e2b3e5..202284b42fd9 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -185,6 +185,10 @@ nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { + struct nfp_repr *repr = netdev_priv(netdev); + + kfree(repr->app_priv); + tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, netdev_priv(netdev)); } @@ -225,7 +229,9 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; + struct nfp_flower_repr_priv *repr_priv; enum nfp_port_type port_type; + struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; int i, err, reify_cnt; const u8 queue = 0; @@ -248,6 +254,15 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } + repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); + if (!repr_priv) { + err = -ENOMEM; + goto err_reprs_clean; + } + + nfp_repr = netdev_priv(repr); + nfp_repr->app_priv = repr_priv; + /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); @@ -324,6 +339,8 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; @@ -351,6 +368,15 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } + repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); + if (!repr_priv) { + err = -ENOMEM; + goto err_reprs_clean; + } + + nfp_repr = netdev_priv(repr); + nfp_repr->app_priv = repr_priv; + port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 6e82aa4ed84b..7ce255705446 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -160,6 +160,14 @@ struct nfp_flower_priv { struct nfp_mtu_conf mtu_conf; }; +/** + * struct nfp_flower_repr_priv - Flower APP per-repr priv data + * @lag_port_flags: Extended port flags to record lag state of repr + */ +struct nfp_flower_repr_priv { + unsigned long lag_port_flags; +}; + struct nfp_fl_key_ls { u32 key_layer_two; u8 key_layer; -- cgit v1.2.3 From f44aa9ef7950a56daa3a5b41f069761f945f1a1f Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:52 -0700 Subject: net: include hash policy in LAG changeupper info LAG upper event notifiers contain the tx type used by the LAG device. Extend this to also include the hash policy used for tx types that utilize hashing. Signed-off-by: John Hurley Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 27 ++++++++++++++++++++++++++- drivers/net/team/team.c | 1 + include/linux/netdevice.h | 11 +++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fea17b92b1ae..bd53a71f6b00 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1218,12 +1218,37 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) } } +static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, + enum netdev_lag_tx_type type) +{ + if (type != NETDEV_LAG_TX_TYPE_HASH) + return NETDEV_LAG_HASH_NONE; + + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + return NETDEV_LAG_HASH_L2; + case BOND_XMIT_POLICY_LAYER34: + return NETDEV_LAG_HASH_L34; + case BOND_XMIT_POLICY_LAYER23: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_ENCAP23: + return NETDEV_LAG_HASH_E23; + case BOND_XMIT_POLICY_ENCAP34: + return NETDEV_LAG_HASH_E34; + default: + return NETDEV_LAG_HASH_UNKNOWN; + } +} + static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, struct netlink_ext_ack *extack) { struct netdev_lag_upper_info lag_upper_info; + enum netdev_lag_tx_type type; - lag_upper_info.tx_type = bond_lag_tx_type(bond); + type = bond_lag_tx_type(bond); + lag_upper_info.tx_type = type; + lag_upper_info.hash_type = bond_lag_hash_type(bond, type); return netdev_master_upper_dev_link(slave->dev, bond->dev, slave, &lag_upper_info, extack); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d6ff881165d0..e6730a01d130 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1129,6 +1129,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port, int err; lag_upper_info.tx_type = team->mode->lag_tx_type; + lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN; err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, &lag_upper_info, extack); if (err) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index debdb6286170..8452f72087ef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2332,8 +2332,19 @@ enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_HASH, }; +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE, + NETDEV_LAG_HASH_L2, + NETDEV_LAG_HASH_L34, + NETDEV_LAG_HASH_L23, + NETDEV_LAG_HASH_E23, + NETDEV_LAG_HASH_E34, + NETDEV_LAG_HASH_UNKNOWN, +}; + struct netdev_lag_upper_info { enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; }; struct netdev_lag_lower_state_info { -- cgit v1.2.3 From bb9a8d031140f186d13d82f57b0f5646d596652f Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:53 -0700 Subject: nfp: flower: monitor and offload LAG groups Monitor LAG events via the NETDEV_CHANGEUPPER/NETDEV_CHANGELOWERSTATE notifiers to maintain a list of offloadable groups. Sync these groups with HW via a delayed workqueue to prevent excessive re-configuration. When the workqueue is triggered it may generate multiple control messages for different groups. These messages are linked via a batch ID and flags to indicate a new batch and the end of a batch. Update private data in each repr to track their LAG lower state flags. The state of a repr is used to determine the active netdevs that can be offloaded. For example, in active-backup mode, we only offload the netdev currently active. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + .../net/ethernet/netronome/nfp/flower/lag_conf.c | 589 +++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.c | 29 +- drivers/net/ethernet/netronome/nfp/flower/main.h | 30 ++ 4 files changed, 646 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 6373f56205fd..4afb10375397 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -37,6 +37,7 @@ ifeq ($(CONFIG_NFP_APP_FLOWER),y) nfp-objs += \ flower/action.o \ flower/cmsg.o \ + flower/lag_conf.o \ flower/main.o \ flower/match.o \ flower/metadata.o \ diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c new file mode 100644 index 000000000000..35a700b879d7 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -0,0 +1,589 @@ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "main.h" + +/* LAG group config flags. */ +#define NFP_FL_LAG_LAST BIT(1) +#define NFP_FL_LAG_FIRST BIT(2) +#define NFP_FL_LAG_SWITCH BIT(6) +#define NFP_FL_LAG_RESET BIT(7) + +/* LAG port state flags. */ +#define NFP_PORT_LAG_LINK_UP BIT(0) +#define NFP_PORT_LAG_TX_ENABLED BIT(1) +#define NFP_PORT_LAG_CHANGED BIT(2) + +enum nfp_fl_lag_batch { + NFP_FL_LAG_BATCH_FIRST, + NFP_FL_LAG_BATCH_MEMBER, + NFP_FL_LAG_BATCH_FINISHED +}; + +/** + * struct nfp_flower_cmsg_lag_config - control message payload for LAG config + * @ctrl_flags: Configuration flags + * @reserved: Reserved for future use + * @ttl: Time to live of packet - host always sets to 0xff + * @pkt_number: Config message packet number - increment for each message + * @batch_ver: Batch version of messages - increment for each batch of messages + * @group_id: Group ID applicable + * @group_inst: Group instance number - increment when group is reused + * @members: Array of 32-bit words listing all active group members + */ +struct nfp_flower_cmsg_lag_config { + u8 ctrl_flags; + u8 reserved[2]; + u8 ttl; + __be32 pkt_number; + __be32 batch_ver; + __be32 group_id; + __be32 group_inst; + __be32 members[]; +}; + +/** + * struct nfp_fl_lag_group - list entry for each LAG group + * @group_id: Assigned group ID for host/kernel sync + * @group_inst: Group instance in case of ID reuse + * @list: List entry + * @master_ndev: Group master Netdev + * @dirty: Marked if the group needs synced to HW + * @offloaded: Marked if the group is currently offloaded to NIC + * @to_remove: Marked if the group should be removed from NIC + * @to_destroy: Marked if the group should be removed from driver + * @slave_cnt: Number of slaves in group + */ +struct nfp_fl_lag_group { + unsigned int group_id; + u8 group_inst; + struct list_head list; + struct net_device *master_ndev; + bool dirty; + bool offloaded; + bool to_remove; + bool to_destroy; + unsigned int slave_cnt; +}; + +#define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0) +#define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0) +#define NFP_FL_LAG_HOST_TTL 0xff + +/* Use this ID with zero members to ack a batch config */ +#define NFP_FL_LAG_SYNC_ID 0 +#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */ +#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */ + +/* wait for more config */ +#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2)) + +static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) +{ + lag->pkt_num++; + lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK; + + return lag->pkt_num; +} + +static void nfp_fl_increment_version(struct nfp_fl_lag *lag) +{ + /* LSB is not considered by firmware so add 2 for each increment. */ + lag->batch_ver += 2; + lag->batch_ver &= NFP_FL_LAG_VERSION_MASK; + + /* Zero is reserved by firmware. */ + if (!lag->batch_ver) + lag->batch_ver += 2; +} + +static struct nfp_fl_lag_group * +nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master) +{ + struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + int id; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN, + NFP_FL_LAG_GROUP_MAX, GFP_KERNEL); + if (id < 0) { + nfp_flower_cmsg_warn(priv->app, + "No more bonding groups available\n"); + return ERR_PTR(id); + } + + group = kmalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + ida_simple_remove(&lag->ida_handle, id); + return ERR_PTR(-ENOMEM); + } + + group->group_id = id; + group->master_ndev = master; + group->dirty = true; + group->offloaded = false; + group->to_remove = false; + group->to_destroy = false; + group->slave_cnt = 0; + group->group_inst = ++lag->global_inst; + list_add_tail(&group->list, &lag->group_list); + + return group; +} + +static struct nfp_fl_lag_group * +nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, + struct net_device *master) +{ + struct nfp_fl_lag_group *entry; + + if (!master) + return NULL; + + list_for_each_entry(entry, &lag->group_list, list) + if (entry->master_ndev == master) + return entry; + + return NULL; +} + +static int +nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group, + struct net_device **active_members, + unsigned int member_cnt, enum nfp_fl_lag_batch *batch) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + struct nfp_flower_priv *priv; + unsigned long int flags; + unsigned int size, i; + struct sk_buff *skb; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt; + skb = nfp_flower_cmsg_alloc(priv->app, size, + NFP_FLOWER_CMSG_TYPE_LAG_CONFIG, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + flags = 0; + + /* Increment batch version for each new batch of config messages. */ + if (*batch == NFP_FL_LAG_BATCH_FIRST) { + flags |= NFP_FL_LAG_FIRST; + nfp_fl_increment_version(lag); + *batch = NFP_FL_LAG_BATCH_MEMBER; + } + + /* If it is a reset msg then it is also the end of the batch. */ + if (lag->rst_cfg) { + flags |= NFP_FL_LAG_RESET; + *batch = NFP_FL_LAG_BATCH_FINISHED; + } + + /* To signal the end of a batch, both the switch and last flags are set + * and the the reserved SYNC group ID is used. + */ + if (*batch == NFP_FL_LAG_BATCH_FINISHED) { + flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST; + lag->rst_cfg = false; + cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID); + cmsg_payload->group_inst = 0; + } else { + cmsg_payload->group_id = cpu_to_be32(group->group_id); + cmsg_payload->group_inst = cpu_to_be32(group->group_inst); + } + + cmsg_payload->reserved[0] = 0; + cmsg_payload->reserved[1] = 0; + cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL; + cmsg_payload->ctrl_flags = flags; + cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver); + cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag)); + + for (i = 0; i < member_cnt; i++) + cmsg_payload->members[i] = + cpu_to_be32(nfp_repr_get_port_id(active_members[i])); + + nfp_ctrl_tx(priv->app->ctrl, skb); + return 0; +} + +static void nfp_fl_lag_do_work(struct work_struct *work) +{ + enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; + struct nfp_fl_lag_group *entry, *storage; + struct delayed_work *delayed_work; + struct nfp_flower_priv *priv; + struct nfp_fl_lag *lag; + int err; + + delayed_work = to_delayed_work(work); + lag = container_of(delayed_work, struct nfp_fl_lag, work); + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + mutex_lock(&lag->lock); + list_for_each_entry_safe(entry, storage, &lag->group_list, list) { + struct net_device *iter_netdev, **acti_netdevs; + struct nfp_flower_repr_priv *repr_priv; + int active_count = 0, slaves = 0; + struct nfp_repr *repr; + unsigned long *flags; + + if (entry->to_remove) { + /* Active count of 0 deletes group on hw. */ + err = nfp_fl_lag_config_group(lag, entry, NULL, 0, + &batch); + if (!err) { + entry->to_remove = false; + entry->offloaded = false; + } else { + nfp_flower_cmsg_warn(priv->app, + "group delete failed\n"); + schedule_delayed_work(&lag->work, + NFP_FL_LAG_DELAY); + continue; + } + + if (entry->to_destroy) { + ida_simple_remove(&lag->ida_handle, + entry->group_id); + list_del(&entry->list); + kfree(entry); + } + continue; + } + + acti_netdevs = kmalloc_array(entry->slave_cnt, + sizeof(*acti_netdevs), GFP_KERNEL); + + /* Include sanity check in the loop. It may be that a bond has + * changed between processing the last notification and the + * work queue triggering. If the number of slaves has changed + * or it now contains netdevs that cannot be offloaded, ignore + * the group until pending notifications are processed. + */ + rcu_read_lock(); + for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) { + if (!nfp_netdev_is_nfp_repr(iter_netdev)) { + slaves = 0; + break; + } + + repr = netdev_priv(iter_netdev); + + if (repr->app != priv->app) { + slaves = 0; + break; + } + + slaves++; + if (slaves > entry->slave_cnt) + break; + + /* Check the ports for state changes. */ + repr_priv = repr->app_priv; + flags = &repr_priv->lag_port_flags; + + if (*flags & NFP_PORT_LAG_CHANGED) { + *flags &= ~NFP_PORT_LAG_CHANGED; + entry->dirty = true; + } + + if ((*flags & NFP_PORT_LAG_TX_ENABLED) && + (*flags & NFP_PORT_LAG_LINK_UP)) + acti_netdevs[active_count++] = iter_netdev; + } + rcu_read_unlock(); + + if (slaves != entry->slave_cnt || !entry->dirty) { + kfree(acti_netdevs); + continue; + } + + err = nfp_fl_lag_config_group(lag, entry, acti_netdevs, + active_count, &batch); + if (!err) { + entry->offloaded = true; + entry->dirty = false; + } else { + nfp_flower_cmsg_warn(priv->app, + "group offload failed\n"); + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + } + + kfree(acti_netdevs); + } + + /* End the config batch if at least one packet has been batched. */ + if (batch == NFP_FL_LAG_BATCH_MEMBER) { + batch = NFP_FL_LAG_BATCH_FINISHED; + err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); + if (err) + nfp_flower_cmsg_warn(priv->app, + "group batch end cmsg failed\n"); + } + + mutex_unlock(&lag->lock); +} + +static void +nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, + struct nfp_fl_lag_group *group) +{ + group->to_remove = true; + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); +} + +static int +nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, + struct net_device *master) +{ + struct nfp_fl_lag_group *group; + + mutex_lock(&lag->lock); + group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); + if (!group) { + mutex_unlock(&lag->lock); + return -ENOENT; + } + + group->to_remove = true; + group->to_destroy = true; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev, *iter_netdev; + struct netdev_lag_upper_info *lag_upper_info; + struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + unsigned int slave_count = 0; + bool can_offload = true; + struct nfp_repr *repr; + + if (!netif_is_lag_master(upper)) + return 0; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, iter_netdev) { + if (!nfp_netdev_is_nfp_repr(iter_netdev)) { + can_offload = false; + break; + } + repr = netdev_priv(iter_netdev); + + /* Ensure all ports are created by the same app/on same card. */ + if (repr->app != priv->app) { + can_offload = false; + break; + } + + slave_count++; + } + rcu_read_unlock(); + + lag_upper_info = info->upper_info; + + /* Firmware supports active/backup and L3/L4 hash bonds. */ + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && + (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + can_offload = false; + nfp_flower_cmsg_warn(priv->app, + "Unable to offload tx_type %u hash %u\n", + lag_upper_info->tx_type, + lag_upper_info->hash_type); + } + + mutex_lock(&lag->lock); + group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper); + + if (slave_count == 0 || !can_offload) { + /* Cannot offload the group - remove if previously offloaded. */ + if (group && group->offloaded) + nfp_fl_lag_schedule_group_remove(lag, group); + + mutex_unlock(&lag->lock); + return 0; + } + + if (!group) { + group = nfp_fl_lag_group_create(lag, upper); + if (IS_ERR(group)) { + mutex_unlock(&lag->lock); + return PTR_ERR(group); + } + } + + group->dirty = true; + group->slave_cnt = slave_count; + + /* Group may have been on queue for removal but is now offfloable. */ + group->to_remove = false; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_flower_priv *priv; + struct nfp_repr *repr; + unsigned long *flags; + + if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev)) + return 0; + + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) + return 0; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + repr = netdev_priv(netdev); + + /* Verify that the repr is associated with this app. */ + if (repr->app != priv->app) + return 0; + + repr_priv = repr->app_priv; + flags = &repr_priv->lag_port_flags; + + mutex_lock(&lag->lock); + if (lag_lower_info->link_up) + *flags |= NFP_PORT_LAG_LINK_UP; + else + *flags &= ~NFP_PORT_LAG_LINK_UP; + + if (lag_lower_info->tx_enabled) + *flags |= NFP_PORT_LAG_TX_ENABLED; + else + *flags &= ~NFP_PORT_LAG_TX_ENABLED; + + *flags |= NFP_PORT_LAG_CHANGED; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct net_device *netdev; + struct nfp_fl_lag *lag; + int err; + + netdev = netdev_notifier_info_to_dev(ptr); + lag = container_of(nb, struct nfp_fl_lag, lag_nb); + + switch (event) { + case NETDEV_CHANGEUPPER: + err = nfp_fl_lag_changeupper_event(lag, ptr); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + case NETDEV_CHANGELOWERSTATE: + err = nfp_fl_lag_changels_event(lag, netdev, ptr); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + case NETDEV_UNREGISTER: + if (netif_is_bond_master(netdev)) { + err = nfp_fl_lag_schedule_group_delete(lag, netdev); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + } + } + + return NOTIFY_DONE; +} + +int nfp_flower_lag_reset(struct nfp_fl_lag *lag) +{ + enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; + + lag->rst_cfg = true; + return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); +} + +void nfp_flower_lag_init(struct nfp_fl_lag *lag) +{ + INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work); + INIT_LIST_HEAD(&lag->group_list); + mutex_init(&lag->lock); + ida_init(&lag->ida_handle); + + /* 0 is a reserved batch version so increment to first valid value. */ + nfp_fl_increment_version(lag); + + lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event; +} + +void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) +{ + struct nfp_fl_lag_group *entry, *storage; + + cancel_delayed_work_sync(&lag->work); + + /* Remove all groups. */ + mutex_lock(&lag->lock); + list_for_each_entry_safe(entry, storage, &lag->group_list, list) { + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&lag->lock); + mutex_destroy(&lag->lock); + ida_destroy(&lag->ida_handle); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 202284b42fd9..19cfa162ac65 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -575,12 +575,14 @@ static int nfp_flower_init(struct nfp_app *app) /* Tell the firmware that the driver supports lag. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_balance_sync_enable", 1); - if (!err) + if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; - else if (err == -ENOENT) + nfp_flower_lag_init(&app_priv->nfp_lag); + } else if (err == -ENOENT) { nfp_warn(app->cpp, "LAG not supported by FW.\n"); - else + } else { goto err_cleanup_metadata; + } return 0; @@ -599,6 +601,9 @@ static void nfp_flower_clean(struct nfp_app *app) skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + nfp_flower_lag_cleanup(&app_priv->nfp_lag); + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; @@ -665,11 +670,29 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_start(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + int err; + + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + err = nfp_flower_lag_reset(&app_priv->nfp_lag); + if (err) + return err; + + err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + if (err) + return err; + } + return nfp_tunnel_config_start(app); } static void nfp_flower_stop(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + nfp_tunnel_config_stop(app); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 7ce255705446..e03efb034948 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -43,6 +43,7 @@ #include #include #include +#include struct net_device; struct nfp_app; @@ -97,6 +98,30 @@ struct nfp_mtu_conf { spinlock_t lock; }; +/** + * struct nfp_fl_lag - Flower APP priv data for link aggregation + * @lag_nb: Notifier to track master/slave events + * @work: Work queue for writing configs to the HW + * @lock: Lock to protect lag_group_list + * @group_list: List of all master/slave groups offloaded + * @ida_handle: IDA to handle group ids + * @pkt_num: Incremented for each config packet sent + * @batch_ver: Incremented for each batch of config packets + * @global_inst: Instance allocator for groups + * @rst_cfg: Marker to reset HW LAG config + */ +struct nfp_fl_lag { + struct notifier_block lag_nb; + struct delayed_work work; + struct mutex lock; + struct list_head group_list; + struct ida ida_handle; + unsigned int pkt_num; + unsigned int batch_ver; + u8 global_inst; + bool rst_cfg; +}; + /** * struct nfp_flower_priv - Flower APP per-vNIC priv data * @app: Back pointer to app @@ -129,6 +154,7 @@ struct nfp_mtu_conf { * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value + * @nfp_lag: Link aggregation data block */ struct nfp_flower_priv { struct nfp_app *app; @@ -158,6 +184,7 @@ struct nfp_flower_priv { atomic_t reify_replies; wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; + struct nfp_fl_lag nfp_lag; }; /** @@ -250,5 +277,8 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, void *cb_priv); +void nfp_flower_lag_init(struct nfp_fl_lag *lag); +void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); +int nfp_flower_lag_reset(struct nfp_fl_lag *lag); #endif -- cgit v1.2.3 From 2e1cc5226b44100696dcab2d59d6fbc789db6153 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:54 -0700 Subject: nfp: flower: implement host cmsg handler for LAG Adds the control message handler to synchronize offloaded group config with that of the kernel. Such messages are sent from fw to driver and feature the following 3 flags: - Data: an attached cmsg could not be processed - store for retransmission - Xon: FW can accept new messages - retransmit any stored cmsgs - Sync: full sync requested so retransmit all kernel LAG group info Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 8 +- .../net/ethernet/netronome/nfp/flower/lag_conf.c | 95 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.h | 4 + 3 files changed, 105 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 03aae2ed9983..cb8565222621 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -242,6 +242,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) struct nfp_flower_priv *app_priv = app->priv; struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; + bool skb_stored = false; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); @@ -260,8 +261,10 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_tunnel_keep_alive(app, skb); break; case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); break; + } /* fall through */ default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", @@ -269,7 +272,8 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) goto out; } - dev_consume_skb_any(skb); + if (!skb_stored) + dev_consume_skb_any(skb); return; out: dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 35a700b879d7..a09fe2778250 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -36,6 +36,9 @@ /* LAG group config flags. */ #define NFP_FL_LAG_LAST BIT(1) #define NFP_FL_LAG_FIRST BIT(2) +#define NFP_FL_LAG_DATA BIT(3) +#define NFP_FL_LAG_XON BIT(4) +#define NFP_FL_LAG_SYNC BIT(5) #define NFP_FL_LAG_SWITCH BIT(6) #define NFP_FL_LAG_RESET BIT(7) @@ -108,6 +111,8 @@ struct nfp_fl_lag_group { /* wait for more config */ #define NFP_FL_LAG_DELAY (msecs_to_jiffies(2)) +#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */ + static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) { lag->pkt_num++; @@ -360,6 +365,92 @@ static void nfp_fl_lag_do_work(struct work_struct *work) mutex_unlock(&lag->lock); } +static int +nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX) + return -EINVAL; + + /* Drop cmsg retrans if storage limit is exceeded to prevent + * overloading. If the fw notices that expected messages have not been + * received in a given time block, it will request a full resync. + */ + if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT) + return -ENOSPC; + + __skb_queue_tail(&lag->retrans_skbs, skb); + + return 0; +} + +static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag) +{ + struct nfp_flower_priv *priv; + struct sk_buff *skb; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + while ((skb = __skb_dequeue(&lag->retrans_skbs))) + nfp_ctrl_tx(priv->app->ctrl, skb); +} + +bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group_entry; + unsigned long int flags; + bool store_skb = false; + int err; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + flags = cmsg_payload->ctrl_flags; + + /* Note the intentional fall through below. If DATA and XON are both + * set, the message will stored and sent again with the rest of the + * unprocessed messages list. + */ + + /* Store */ + if (flags & NFP_FL_LAG_DATA) + if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb)) + store_skb = true; + + /* Send stored */ + if (flags & NFP_FL_LAG_XON) + nfp_fl_send_unprocessed(&priv->nfp_lag); + + /* Resend all */ + if (flags & NFP_FL_LAG_SYNC) { + /* To resend all config: + * 1) Clear all unprocessed messages + * 2) Mark all groups dirty + * 3) Reset NFP group config + * 4) Schedule a LAG config update + */ + + __skb_queue_purge(&priv->nfp_lag.retrans_skbs); + + mutex_lock(&priv->nfp_lag.lock); + list_for_each_entry(group_entry, &priv->nfp_lag.group_list, + list) + group_entry->dirty = true; + + err = nfp_flower_lag_reset(&priv->nfp_lag); + if (err) + nfp_flower_cmsg_warn(priv->app, + "mem err in group reset msg\n"); + mutex_unlock(&priv->nfp_lag.lock); + + schedule_delayed_work(&priv->nfp_lag.work, 0); + } + + return store_skb; +} + static void nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group) @@ -565,6 +656,8 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag) mutex_init(&lag->lock); ida_init(&lag->ida_handle); + __skb_queue_head_init(&lag->retrans_skbs); + /* 0 is a reserved batch version so increment to first valid value. */ nfp_fl_increment_version(lag); @@ -577,6 +670,8 @@ void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) cancel_delayed_work_sync(&lag->work); + __skb_queue_purge(&lag->retrans_skbs); + /* Remove all groups. */ mutex_lock(&lag->lock); list_for_each_entry_safe(entry, storage, &lag->group_list, list) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e03efb034948..2fd75c155ccb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -109,6 +109,8 @@ struct nfp_mtu_conf { * @batch_ver: Incremented for each batch of config packets * @global_inst: Instance allocator for groups * @rst_cfg: Marker to reset HW LAG config + * @retrans_skbs: Cmsgs that could not be processed by HW and require + * retransmission */ struct nfp_fl_lag { struct notifier_block lag_nb; @@ -120,6 +122,7 @@ struct nfp_fl_lag { unsigned int batch_ver; u8 global_inst; bool rst_cfg; + struct sk_buff_head retrans_skbs; }; /** @@ -280,5 +283,6 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); +bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb); #endif -- cgit v1.2.3 From 7e24a59311ea4a92c38f76756496b06293c50afb Mon Sep 17 00:00:00 2001 From: John Hurley Date: Wed, 23 May 2018 19:22:55 -0700 Subject: nfp: flower: compute link aggregation action If the egress device of an offloaded rule is a LAG port, then encode the output port to the NFP with a LAG identifier and the offloaded group ID. A prelag action is also offloaded which must be the first action of the series (although may appear after other pre-actions - e.g. tunnels). This causes the FW to check that it has the necessary information to output to the requested LAG port. If it does not, the packet is sent to the kernel before any other actions are applied to it. Signed-off-by: John Hurley Reviewed-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 131 +++++++++++++++++---- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 13 ++ .../net/ethernet/netronome/nfp/flower/lag_conf.c | 42 +++++++ drivers/net/ethernet/netronome/nfp/flower/main.h | 9 +- .../net/ethernet/netronome/nfp/flower/offload.c | 2 +- 5 files changed, 169 insertions(+), 28 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 80df9a5d4217..4a6d2db75071 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -72,6 +72,42 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } +static int +nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, + struct nfp_fl_payload *nfp_flow, int act_len) +{ + size_t act_size = sizeof(struct nfp_fl_pre_lag); + struct nfp_fl_pre_lag *pre_lag; + struct net_device *out_dev; + int err; + + out_dev = tcf_mirred_dev(action); + if (!out_dev || !netif_is_lag_master(out_dev)) + return 0; + + if (act_len + act_size > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + /* Pre_lag action must be first on action list. + * If other actions already exist they need pushed forward. + */ + if (act_len) + memmove(nfp_flow->action_data + act_size, + nfp_flow->action_data, act_len); + + pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data; + err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag); + if (err) + return err; + + pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG; + pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + return act_size; +} + static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, enum nfp_flower_tun_type tun_type) { @@ -88,12 +124,13 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, } static int -nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, - struct nfp_fl_payload *nfp_flow, bool last, - struct net_device *in_dev, enum nfp_flower_tun_type tun_type, - int *tun_out_cnt) +nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, + const struct tc_action *action, struct nfp_fl_payload *nfp_flow, + bool last, struct net_device *in_dev, + enum nfp_flower_tun_type tun_type, int *tun_out_cnt) { size_t act_size = sizeof(struct nfp_fl_output); + struct nfp_flower_priv *priv = app->priv; struct net_device *out_dev; u16 tmp_flags; @@ -118,6 +155,15 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, output->flags = cpu_to_be16(tmp_flags | NFP_FL_OUT_FLAGS_USE_TUN); output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); + } else if (netif_is_lag_master(out_dev) && + priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + int gid; + + output->flags = cpu_to_be16(tmp_flags); + gid = nfp_flower_lag_get_output_id(app, out_dev); + if (gid < 0) + return gid; + output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); } else { /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); @@ -164,7 +210,7 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) struct nfp_fl_pre_tunnel *pre_tun_act; /* Pre_tunnel action must be first on action list. - * If other actions already exist they need pushed forward. + * If other actions already exist they need to be pushed forward. */ if (act_len) memmove(act_data + act_size, act_data, act_len); @@ -443,42 +489,73 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) } static int -nfp_flower_loop_action(const struct tc_action *a, +nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, + struct nfp_fl_payload *nfp_fl, int *a_len, + struct net_device *netdev, bool last, + enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, + int *out_cnt) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_output *output; + int err, prelag_size; + + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; + err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type, + tun_out_cnt); + if (err) + return err; + + *a_len += sizeof(struct nfp_fl_output); + + if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + /* nfp_fl_pre_lag returns -err or size of prelag action added. + * This will be 0 if it is not egressing to a lag dev. + */ + prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len); + if (prelag_size < 0) + return prelag_size; + else if (prelag_size > 0 && (!last || *out_cnt)) + return -EOPNOTSUPP; + + *a_len += prelag_size; + } + (*out_cnt)++; + + return 0; +} + +static int +nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, - enum nfp_flower_tun_type *tun_type, int *tun_out_cnt) + enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, + int *out_cnt) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_push_vlan *psh_v; struct nfp_fl_pop_vlan *pop_v; - struct nfp_fl_output *output; int err; if (is_tcf_gact_shot(a)) { nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); } else if (is_tcf_mirred_egress_redirect(a)) { - if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) - return -EOPNOTSUPP; - - output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type, - tun_out_cnt); + err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + true, tun_type, tun_out_cnt, + out_cnt); if (err) return err; - *a_len += sizeof(struct nfp_fl_output); } else if (is_tcf_mirred_egress_mirror(a)) { - if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) - return -EOPNOTSUPP; - - output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type, - tun_out_cnt); + err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + false, tun_type, tun_out_cnt, + out_cnt); if (err) return err; - *a_len += sizeof(struct nfp_fl_output); } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -535,11 +612,12 @@ nfp_flower_loop_action(const struct tc_action *a, return 0; } -int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_action(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { - int act_len, act_cnt, err, tun_out_cnt; + int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; LIST_HEAD(actions); @@ -550,11 +628,12 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, act_len = 0; act_cnt = 0; tun_out_cnt = 0; + out_cnt = 0; tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt); + err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, + &tun_type, &tun_out_cnt, &out_cnt); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 3a42a1fc55cb..4a7f3510a296 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -92,6 +92,7 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 #define NFP_FL_ACTION_OPCODE_SET_UDP 14 #define NFP_FL_ACTION_OPCODE_SET_TCP 15 +#define NFP_FL_ACTION_OPCODE_PRE_LAG 16 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -103,6 +104,9 @@ #define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) +/* LAG ports */ +#define NFP_FL_LAG_OUT 0xC0DE0000 + /* Tunnel ports */ #define NFP_FL_PORT_TYPE_TUN 0x50000000 #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) @@ -177,6 +181,15 @@ struct nfp_fl_pop_vlan { __be16 reserved; }; +struct nfp_fl_pre_lag { + struct nfp_fl_act_head head; + __be16 group_id; + u8 lag_version[3]; + u8 instance; +}; + +#define NFP_FL_PRE_LAG_VER_OFF 8 + struct nfp_fl_pre_tunnel { struct nfp_fl_act_head head; __be16 reserved; diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index a09fe2778250..0c4c957717ea 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -184,6 +184,48 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, return NULL; } +int nfp_flower_lag_populate_pre_action(struct nfp_app *app, + struct net_device *master, + struct nfp_fl_pre_lag *pre_act) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group = NULL; + __be32 temp_vers; + + mutex_lock(&priv->nfp_lag.lock); + group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag, + master); + if (!group) { + mutex_unlock(&priv->nfp_lag.lock); + return -ENOENT; + } + + pre_act->group_id = cpu_to_be16(group->group_id); + temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver << + NFP_FL_PRE_LAG_VER_OFF); + memcpy(pre_act->lag_version, &temp_vers, 3); + pre_act->instance = group->group_inst; + mutex_unlock(&priv->nfp_lag.lock); + + return 0; +} + +int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group = NULL; + int group_id = -ENOENT; + + mutex_lock(&priv->nfp_lag.lock); + group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag, + master); + if (group) + group_id = group->group_id; + mutex_unlock(&priv->nfp_lag.lock); + + return group_id; +} + static int nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group, struct net_device **active_members, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 2fd75c155ccb..bbe5764d26cb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -45,6 +45,7 @@ #include #include +struct nfp_fl_pre_lag; struct net_device; struct nfp_app; @@ -253,7 +254,8 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type); -int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_action(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow); int nfp_compile_flow_metadata(struct nfp_app *app, @@ -284,5 +286,10 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb); +int nfp_flower_lag_populate_pre_action(struct nfp_app *app, + struct net_device *master, + struct nfp_fl_pre_lag *pre_act); +int nfp_flower_lag_get_output_id(struct nfp_app *app, + struct net_device *master); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 70ec9d821b91..c42e64f32333 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -440,7 +440,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_flower_compile_action(flow, netdev, flow_pay); + err = nfp_flower_compile_action(app, flow, netdev, flow_pay); if (err) goto err_destroy_flow; -- cgit v1.2.3 From 46dbf98c69f476a0928393310617f15a9c6469b5 Mon Sep 17 00:00:00 2001 From: Anilkumar Kolli Date: Wed, 23 May 2018 11:09:02 +0300 Subject: ath10k: add memory dump support for QCA9888 and QCA99X0 This patch adds firmware crash memory dump support for QCA9888 and QCA99X0. Tested on: QCA9888 firmware 10.4-3.5.3-00053 QCA99X0 firmware 10.4.1.00030-1 Signed-off-by: Anilkumar Kolli Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/coredump.c | 98 ++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index f90cec0ebb1c..4d28063052fe 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -701,6 +701,89 @@ static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { }, }; +static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x60000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x98000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOSRAM, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x80000, + .len = 0x6000, + .name = "SOC REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, @@ -848,6 +931,21 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, + { + .hw_id = QCA9888_HW_2_0_DEV_VERSION, + .region_table = { + .regions = qca9984_hw10_mem_regions, + .size = ARRAY_SIZE(qca9984_hw10_mem_regions), + }, + }, + { + .hw_id = QCA99X0_HW_2_0_DEV_VERSION, + .region_table = { + .regions = qca99x0_hw20_mem_regions, + .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), + }, + }, + }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) -- cgit v1.2.3 From be8cce96f14dc925ecfb702be0392a52cf78adb5 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Wed, 23 May 2018 11:09:09 +0300 Subject: ath10k: add support to configure channel dwell time Configure channel dwell time from duration of the scan request received from mac80211 when the duration is non-zero. When the scan request does not have duration value, use the default ones, the current implementation. Corresponding flag NL80211_EXT_FEATURE_SET_SCAN_DWELL is advertized. Supported Chipsets: -QCA988X/QCA9887 PCI -QCA99X0/QCA9984/QCA9888/QCA4019 PCI -QCA6174/QCA9377 PCI/USB/SDIO -WCN3990 SNOC Tested on QCA9984 with firmware ver 10.4-3.6-0010 Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/mac.c | 23 ++++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index e4ac8f2831fd..eb7ff1116442 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -44,6 +44,7 @@ #define WO(_f) ((_f##_OFFSET) >> 2) #define ATH10K_SCAN_ID 0 +#define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 487a7a7380fd..e3b67ffdab72 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5675,6 +5675,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct wmi_start_scan_arg arg; int ret = 0; int i; + u32 scan_timeout; mutex_lock(&ar->conf_mutex); @@ -5736,6 +5737,22 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.channels[i] = req->channels[i]->center_freq; } + /* if duration is set, default dwell times will be overwritten */ + if (req->duration) { + arg.dwell_time_active = req->duration; + arg.dwell_time_passive = req->duration; + arg.burst_duration_ms = req->duration; + + scan_timeout = min_t(u32, arg.max_rest_time * + (arg.n_channels - 1) + (req->duration + + ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * + arg.n_channels, arg.max_scan_time + 200); + + } else { + /* Add a 200ms margin to account for event/command processing */ + scan_timeout = arg.max_scan_time + 200; + } + ret = ath10k_start_scan(ar, &arg); if (ret) { ath10k_warn(ar, "failed to start hw scan: %d\n", ret); @@ -5744,10 +5761,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); } - /* Add a 200ms margin to account for event/command processing */ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, - msecs_to_jiffies(arg.max_scan_time + - 200)); + msecs_to_jiffies(scan_timeout)); exit: mutex_unlock(&ar->conf_mutex); @@ -8364,6 +8379,8 @@ int ath10k_mac_register(struct ath10k *ar) } wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); /* * on LL hardware queues are managed entirely by the FW -- cgit v1.2.3 From 699e2302c286a14afe7b7394151ce6c4e1790cc1 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:09:53 +0300 Subject: ath: Add regulatory mapping for Bahamas The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 5d80be213fac..f296ff838d2a 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -68,6 +68,7 @@ enum CountryCode { CTRY_AUSTRALIA = 36, CTRY_AUSTRIA = 40, CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, CTRY_BAHRAIN = 48, CTRY_BANGLADESH = 50, CTRY_BARBADOS = 52, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index bdd2b4d61f2f..cde0268cbed6 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -298,6 +298,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, {CTRY_BAHRAIN, APL6_WORLD, "BH"}, {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, {CTRY_BARBADOS, FCC2_WORLD, "BB"}, -- cgit v1.2.3 From 9c790f2d234f65697e3b0948adbfdf36dbe63dd7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:09:59 +0300 Subject: ath: Add regulatory mapping for Bermuda The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: FCC * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index f296ff838d2a..e7b43901d195 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -75,6 +75,7 @@ enum CountryCode { CTRY_BELARUS = 112, CTRY_BELGIUM = 56, CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, CTRY_BOLIVIA = 68, CTRY_BOSNIA_HERZ = 70, CTRY_BRAZIL = 76, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index cde0268cbed6..a8c7f306fd7b 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -306,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, {CTRY_BELIZE, APL1_ETSIC, "BZ"}, + {CTRY_BERMUDA, FCC3_FCCA, "BM"}, {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, -- cgit v1.2.3 From b840fa9123086c485788f9c8e96dd4c18e1154cb Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:12 +0300 Subject: ath: Add regulatory mapping for Kenya The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index a8c7f306fd7b..981f604758b0 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -411,6 +411,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_JORDAN, ETSI2_WORLD, "JO"}, {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ"}, + {CTRY_KENYA, APL1_WORLD, "KE"}, {CTRY_KOREA_NORTH, APL9_WORLD, "KP"}, {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, -- cgit v1.2.3 From a71c984bc4e5e4f1e94a4c362cce764834352d9a Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:19 +0300 Subject: ath: Add regulatory mapping for Mauritius The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index e7b43901d195..8fbf0a684bc9 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -138,6 +138,7 @@ enum CountryCode { CTRY_MACEDONIA = 807, CTRY_MALAYSIA = 458, CTRY_MALTA = 470, + CTRY_MAURITIUS = 480, CTRY_MEXICO = 484, CTRY_MONACO = 492, CTRY_MOROCCO = 504, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 981f604758b0..3718bbcdd945 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -426,6 +426,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_MACEDONIA, NULL1_WORLD, "MK"}, {CTRY_MALAYSIA, APL8_WORLD, "MY"}, {CTRY_MALTA, ETSI1_WORLD, "MT"}, + {CTRY_MAURITIUS, ETSI1_WORLD, "MU"}, {CTRY_MEXICO, FCC1_FCCA, "MX"}, {CTRY_MONACO, ETSI4_WORLD, "MC"}, {CTRY_MOROCCO, APL4_WORLD, "MA"}, -- cgit v1.2.3 From a0a6f2a916dccc394404117534e9587866a7eefa Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:22 +0300 Subject: ath: Add regulatory mapping for Montenegro The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 8fbf0a684bc9..8edf6d894519 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -141,6 +141,7 @@ enum CountryCode { CTRY_MAURITIUS = 480, CTRY_MEXICO = 484, CTRY_MONACO = 492, + CTRY_MONTENEGRO = 499, CTRY_MOROCCO = 504, CTRY_NEPAL = 524, CTRY_NETHERLANDS = 528, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 3718bbcdd945..ef636831812a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -429,6 +429,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_MAURITIUS, ETSI1_WORLD, "MU"}, {CTRY_MEXICO, FCC1_FCCA, "MX"}, {CTRY_MONACO, ETSI4_WORLD, "MC"}, + {CTRY_MONTENEGRO, ETSI1_WORLD, "ME"}, {CTRY_MOROCCO, APL4_WORLD, "MA"}, {CTRY_NEPAL, APL1_WORLD, "NP"}, {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, -- cgit v1.2.3 From a20f1338c5de7a62a9b9b4353618e2c336251b3a Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:26 +0300 Subject: ath: Add regulatory mapping for Nicaragua The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: FCC * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index ef636831812a..6a14049f8b8f 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -435,6 +435,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI"}, {CTRY_NORWAY, ETSI1_WORLD, "NO"}, {CTRY_OMAN, FCC3_WORLD, "OM"}, {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, -- cgit v1.2.3 From 67a956682472c9fef28f1e171a10428c6e7a800f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:29 +0300 Subject: ath: Add regulatory mapping for Paraguya The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 6a14049f8b8f..ad085c795662 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -441,6 +441,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, {CTRY_PANAMA, FCC1_FCCA, "PA"}, {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY"}, {CTRY_PERU, APL1_WORLD, "PE"}, {CTRY_PHILIPPINES, APL1_WORLD, "PH"}, {CTRY_POLAND, ETSI1_WORLD, "PL"}, -- cgit v1.2.3 From 2a3169a54bb53717928392a04fb84deb765b51f1 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:43 +0300 Subject: ath: Add regulatory mapping for Serbia The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 8edf6d894519..58afd500bdb3 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -163,6 +163,7 @@ enum CountryCode { CTRY_ROMANIA = 642, CTRY_RUSSIA = 643, CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA = 688, CTRY_SERBIA_MONTENEGRO = 891, CTRY_SINGAPORE = 702, CTRY_SLOVAKIA = 703, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index ad085c795662..f3764ac9e4e3 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -451,6 +451,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_ROMANIA, NULL1_WORLD, "RO"}, {CTRY_RUSSIA, NULL1_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, + {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, {CTRY_SINGAPORE, APL6_WORLD, "SG"}, {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, -- cgit v1.2.3 From 667ddac5745fb9fddfe8f7fd2523070f50bd4442 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:48 +0300 Subject: ath: Add regulatory mapping for Tanzania The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 58afd500bdb3..894be37344aa 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -175,6 +175,7 @@ enum CountryCode { CTRY_SWITZERLAND = 756, CTRY_SYRIA = 760, CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, CTRY_THAILAND = 764, CTRY_TRINIDAD_Y_TOBAGO = 780, CTRY_TUNISIA = 788, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index f3764ac9e4e3..64f004ec0ef2 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -463,6 +463,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, {CTRY_SYRIA, NULL1_WORLD, "SY"}, {CTRY_TAIWAN, APL3_FCCA, "TW"}, + {CTRY_TANZANIA, APL1_WORLD, "TZ"}, {CTRY_THAILAND, FCC3_WORLD, "TH"}, {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, -- cgit v1.2.3 From 1ea3986ad2bc72081c69f3fbc1e5e0eeb3c44f17 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:10:54 +0300 Subject: ath: Add regulatory mapping for Uganda The country code is used by the ath to detect the ISO 3166-1 alpha-2 name and to select the correct conformance test limits (CTL) for a country. If the country isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this country are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd.h | 1 + drivers/net/wireless/ath/regd_common.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 894be37344aa..d73e45e26547 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -181,6 +181,7 @@ enum CountryCode { CTRY_TUNISIA = 788, CTRY_TURKEY = 792, CTRY_UAE = 784, + CTRY_UGANDA = 800, CTRY_UKRAINE = 804, CTRY_UNITED_KINGDOM = 826, CTRY_UNITED_STATES = 840, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 64f004ec0ef2..f7f0efd437b8 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -468,6 +468,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, {CTRY_TURKEY, ETSI3_WORLD, "TR"}, + {CTRY_UGANDA, FCC3_WORLD, "UG"}, {CTRY_UKRAINE, NULL1_WORLD, "UA"}, {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, -- cgit v1.2.3 From 4f183687e3fad3ce0e06e38976cad81bc4541990 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:05 +0300 Subject: ath: Add regulatory mapping for APL2_FCCA The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this regdomain code are: * 2.4GHz: FCC * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index f7f0efd437b8..969dc12506ee 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -59,6 +59,7 @@ enum EnumRd { MKK1_MKKA1 = 0x4A, MKK1_MKKA2 = 0x4B, MKK1_MKKC = 0x4C, + APL2_FCCA = 0x4D, APL3_FCCA = 0x50, APL1_WORLD = 0x52, @@ -188,6 +189,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC1_FCCA, CTL_FCC, CTL_FCC}, {APL1_WORLD, CTL_FCC, CTL_ETSI}, {APL2_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_FCCA, CTL_FCC, CTL_FCC}, {APL3_WORLD, CTL_FCC, CTL_ETSI}, {APL4_WORLD, CTL_FCC, CTL_ETSI}, {APL5_WORLD, CTL_FCC, CTL_ETSI}, -- cgit v1.2.3 From 9ba8df0c52b3e6baa436374b429d3d73bd09a320 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:14 +0300 Subject: ath: Add regulatory mapping for APL13_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this regdomain code are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 969dc12506ee..f3969d05c816 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -68,6 +68,7 @@ enum EnumRd { APL1_ETSIC = 0x55, APL2_ETSIC = 0x56, APL5_WORLD = 0x58, + APL13_WORLD = 0x5A, APL6_WORLD = 0x5B, APL7_FCCA = 0x5C, APL8_WORLD = 0x5D, @@ -193,6 +194,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {APL3_WORLD, CTL_FCC, CTL_ETSI}, {APL4_WORLD, CTL_FCC, CTL_ETSI}, {APL5_WORLD, CTL_FCC, CTL_ETSI}, + {APL13_WORLD, CTL_ETSI, CTL_ETSI}, {APL6_WORLD, CTL_ETSI, CTL_ETSI}, {APL8_WORLD, CTL_ETSI, CTL_ETSI}, {APL9_WORLD, CTL_ETSI, CTL_ETSI}, -- cgit v1.2.3 From 45faf6e096da8bb80e1ddf8c08a26a9601d9469e Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:18 +0300 Subject: ath: Add regulatory mapping for ETSI8_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this regdomain code are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index f3969d05c816..95a378e6a88b 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -44,6 +44,7 @@ enum EnumRd { ETSI4_ETSIC = 0x38, ETSI5_WORLD = 0x39, ETSI6_WORLD = 0x34, + ETSI8_WORLD = 0x3D, ETSI_RESERVED = 0x33, MKK1_MKKA = 0x40, @@ -181,6 +182,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, -- cgit v1.2.3 From 897fab6c5d901398beaeb4d764b1816fbed9c488 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:24 +0300 Subject: ath: Add regulatory mapping for ETSI9_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this regdomain code are: * 2.4GHz: ETSI * 5GHz: ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 95a378e6a88b..66932c55d352 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -45,6 +45,7 @@ enum EnumRd { ETSI5_WORLD = 0x39, ETSI6_WORLD = 0x34, ETSI8_WORLD = 0x3D, + ETSI9_WORLD = 0x3E, ETSI_RESERVED = 0x33, MKK1_MKKA = 0x40, @@ -183,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI9_WORLD, CTL_ETSI, CTL_ETSI}, /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, -- cgit v1.2.3 From 01fb2994a98dc72c8818c274f7b5983d5dd885c7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:30 +0300 Subject: ath: Add regulatory mapping for FCC3_ETSIC The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't available and it is still programmed in the EEPROM then it will cause an error and stop the initialization with: Invalid EEPROM contents The current CTL mappings for this regdomain code are: * 2.4GHz: ETSI * 5GHz: FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 66932c55d352..437f5dca015a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -35,6 +35,7 @@ enum EnumRd { FRANCE_RES = 0x31, FCC3_FCCA = 0x3A, FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, ETSI1_WORLD = 0x37, ETSI3_ETSIA = 0x32, @@ -172,6 +173,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, {FCC3_FCCA, CTL_FCC, CTL_FCC}, {FCC3_WORLD, CTL_FCC, CTL_ETSI}, + {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, {FCC4_FCCA, CTL_FCC, CTL_FCC}, {FCC5_FCCA, CTL_FCC, CTL_FCC}, {FCC6_FCCA, CTL_FCC, CTL_FCC}, -- cgit v1.2.3 From fed8f5e8303757dcd6b5a149eb6d6b23b3440e2d Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:36 +0300 Subject: ath: Map Albania to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 437f5dca015a..ea0d53e21bae 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -299,7 +299,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { static struct country_code_to_enum_rd allCountries[] = { {CTRY_DEBUG, NO_ENUMRD, "DB"}, {CTRY_DEFAULT, FCC1_FCCA, "CO"}, - {CTRY_ALBANIA, NULL1_WORLD, "AL"}, + {CTRY_ALBANIA, ETSI1_WORLD, "AL"}, {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, {CTRY_ARGENTINA, FCC3_WORLD, "AR"}, {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, -- cgit v1.2.3 From 485daee92bde2b7ec85cb36ab6fdd0f8d2925af7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:45 +0300 Subject: ath: Map Algeria to APL13_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index ea0d53e21bae..37dfa164daf0 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -300,7 +300,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_DEBUG, NO_ENUMRD, "DB"}, {CTRY_DEFAULT, FCC1_FCCA, "CO"}, {CTRY_ALBANIA, ETSI1_WORLD, "AL"}, - {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, + {CTRY_ALGERIA, APL13_WORLD, "DZ"}, {CTRY_ARGENTINA, FCC3_WORLD, "AR"}, {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, {CTRY_ARUBA, ETSI1_WORLD, "AW"}, -- cgit v1.2.3 From 8120cc0b215c5100c10a1c9aef19532a28d83d62 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:49 +0300 Subject: ath: Map Australia to FCC3_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 37dfa164daf0..3a6110ba637d 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -304,7 +304,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_ARGENTINA, FCC3_WORLD, "AR"}, {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, {CTRY_ARUBA, ETSI1_WORLD, "AW"}, - {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, + {CTRY_AUSTRALIA, FCC3_WORLD, "AU"}, {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, -- cgit v1.2.3 From 1507b32891c0f1b5041a61d3c41e93d54602f76d Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:11:55 +0300 Subject: ath: Map Bangladesh to APL1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 3a6110ba637d..d531ae0c9dd6 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -310,7 +310,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, {CTRY_BAHRAIN, APL6_WORLD, "BH"}, - {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, + {CTRY_BANGLADESH, APL1_WORLD, "BD"}, {CTRY_BARBADOS, FCC2_WORLD, "BB"}, {CTRY_BELARUS, ETSI1_WORLD, "BY"}, {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, -- cgit v1.2.3 From 37090d9447050fa6234fa29ecea19165d0b41f64 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:01 +0300 Subject: ath: Map Brunei Darussalam to APL6_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: FCC -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index d531ae0c9dd6..b8f321664078 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -320,7 +320,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, - {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, + {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN"}, {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, {CTRY_CAMBODIA, ETSI1_WORLD, "KH"}, {CTRY_CANADA, FCC3_FCCA, "CA"}, -- cgit v1.2.3 From fed60f81a2d0839b2611929ab60b439e7d3d9714 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:05 +0300 Subject: ath: Map Bulgaria to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index b8f321664078..3039b9b50a8c 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -321,7 +321,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN"}, - {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, + {CTRY_BULGARIA, ETSI1_WORLD, "BG"}, {CTRY_CAMBODIA, ETSI1_WORLD, "KH"}, {CTRY_CANADA, FCC3_FCCA, "CA"}, {CTRY_CANADA2, FCC6_FCCA, "CA"}, -- cgit v1.2.3 From 515e968624b2d9fd413dc7c90c24fb3c768a12a7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:11 +0300 Subject: ath: Map Colombia to FCC1_FCCA The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 3039b9b50a8c..17f7e7768395 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -327,7 +327,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_CANADA2, FCC6_FCCA, "CA"}, {CTRY_CHILE, APL6_WORLD, "CL"}, {CTRY_CHINA, APL1_WORLD, "CN"}, - {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO"}, {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, {CTRY_CROATIA, ETSI1_WORLD, "HR"}, {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, -- cgit v1.2.3 From 0fa31a86998b558ce7ef21ef84bde175d9aba11d Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:15 +0300 Subject: ath: Map Czech to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 17f7e7768395..da0f958e927e 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -331,7 +331,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, {CTRY_CROATIA, ETSI1_WORLD, "HR"}, {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, - {CTRY_CZECH, ETSI3_WORLD, "CZ"}, + {CTRY_CZECH, ETSI1_WORLD, "CZ"}, {CTRY_DENMARK, ETSI1_WORLD, "DK"}, {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO"}, {CTRY_ECUADOR, FCC1_WORLD, "EC"}, -- cgit v1.2.3 From 143a9b9bd7ee9d63f216a164c04fe6e4ee129ed5 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:17 +0300 Subject: ath: Map Honduras to FCC3_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index da0f958e927e..342f2625ad8a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -348,7 +348,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_GUAM, FCC1_FCCA, "GU"}, {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, {CTRY_HAITI, ETSI1_WORLD, "HT"}, - {CTRY_HONDURAS, NULL1_WORLD, "HN"}, + {CTRY_HONDURAS, FCC3_WORLD, "HN"}, {CTRY_HONG_KONG, FCC3_WORLD, "HK"}, {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, {CTRY_ICELAND, ETSI1_WORLD, "IS"}, -- cgit v1.2.3 From 62ba2b22d12026a98557450753441aa9e2a9959d Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:32 +0300 Subject: ath: Map Isreal to ETSI3_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 342f2625ad8a..44c7d5de900a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -356,7 +356,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_INDONESIA, NULL1_WORLD, "ID"}, {CTRY_IRAN, APL1_WORLD, "IR"}, {CTRY_IRELAND, ETSI1_WORLD, "IE"}, - {CTRY_ISRAEL, NULL1_WORLD, "IL"}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL"}, {CTRY_ITALY, ETSI1_WORLD, "IT"}, {CTRY_JAMAICA, FCC3_WORLD, "JM"}, -- cgit v1.2.3 From 64874ed2a73a4d743796ef103392606399a043cd Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:50 +0300 Subject: ath: Map Macedonia to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 44c7d5de900a..dfe0754522a6 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -433,7 +433,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_LITHUANIA, ETSI1_WORLD, "LT"}, {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"}, {CTRY_MACAU, FCC2_WORLD, "MO"}, - {CTRY_MACEDONIA, NULL1_WORLD, "MK"}, + {CTRY_MACEDONIA, ETSI1_WORLD, "MK"}, {CTRY_MALAYSIA, APL8_WORLD, "MY"}, {CTRY_MALTA, ETSI1_WORLD, "MT"}, {CTRY_MAURITIUS, ETSI1_WORLD, "MU"}, -- cgit v1.2.3 From 823a64065adf27d94d27369bad5d2587821b0d98 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:12:56 +0300 Subject: ath: Map Malasia to FCC1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: ETSI -> FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index dfe0754522a6..6d24c85dbdd7 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -434,7 +434,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"}, {CTRY_MACAU, FCC2_WORLD, "MO"}, {CTRY_MACEDONIA, ETSI1_WORLD, "MK"}, - {CTRY_MALAYSIA, APL8_WORLD, "MY"}, + {CTRY_MALAYSIA, FCC1_WORLD, "MY"}, {CTRY_MALTA, ETSI1_WORLD, "MT"}, {CTRY_MAURITIUS, ETSI1_WORLD, "MU"}, {CTRY_MEXICO, FCC1_FCCA, "MX"}, -- cgit v1.2.3 From 9bfc2bb32e01c3a759dbef007cee938390157270 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:05 +0300 Subject: ath: Map New Zealand to FCC3_ETSIC The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 6d24c85dbdd7..035b023422e2 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -444,7 +444,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_NEPAL, APL1_WORLD, "NP"}, {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, - {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ"}, {CTRY_NICARAGUA, FCC3_FCCA, "NI"}, {CTRY_NORWAY, ETSI1_WORLD, "NO"}, {CTRY_OMAN, FCC3_WORLD, "OM"}, -- cgit v1.2.3 From b5c11e4744062285acf6835c8bd123edb244a2f1 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:08 +0300 Subject: ath: Map Peru to APL1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 035b023422e2..9ab23631d043 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -452,7 +452,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_PANAMA, FCC1_FCCA, "PA"}, {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, {CTRY_PARAGUAY, FCC3_WORLD, "PY"}, - {CTRY_PERU, APL1_WORLD, "PE"}, + {CTRY_PERU, FCC3_WORLD, "PE"}, {CTRY_PHILIPPINES, APL1_WORLD, "PH"}, {CTRY_POLAND, ETSI1_WORLD, "PL"}, {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, -- cgit v1.2.3 From c454d4c258444522b1c07f59e989795d3c72cbac Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:15 +0300 Subject: ath: Map Philippines to FCC3_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. This change itself doesn't change the selected CTL of this country and is only required to stay in sync with the QCA mappings. Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 9ab23631d043..a10a1b8cb55b 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -453,7 +453,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, {CTRY_PARAGUAY, FCC3_WORLD, "PY"}, {CTRY_PERU, FCC3_WORLD, "PE"}, - {CTRY_PHILIPPINES, APL1_WORLD, "PH"}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH"}, {CTRY_POLAND, ETSI1_WORLD, "PL"}, {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, -- cgit v1.2.3 From 12f415566fb4487bd5ce6273ed15d1ab6340339c Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:22 +0300 Subject: ath: Map Romania to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index a10a1b8cb55b..3afce206e049 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -458,7 +458,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, {CTRY_QATAR, APL1_WORLD, "QA"}, - {CTRY_ROMANIA, NULL1_WORLD, "RO"}, + {CTRY_ROMANIA, ETSI1_WORLD, "RO"}, {CTRY_RUSSIA, NULL1_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, {CTRY_SERBIA, ETSI1_WORLD, "RS"}, -- cgit v1.2.3 From b7e015132d85f511cfa242a6aa04644fea55fdbb Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:30 +0300 Subject: ath: Map Russia to ETSI8_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 3afce206e049..4a70c52f2940 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -459,7 +459,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, {CTRY_QATAR, APL1_WORLD, "QA"}, {CTRY_ROMANIA, ETSI1_WORLD, "RO"}, - {CTRY_RUSSIA, NULL1_WORLD, "RU"}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, -- cgit v1.2.3 From 054e077880027a469e44444dc9b18d4f2561dbf7 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:43 +0300 Subject: ath: Map Singapore to FCC3_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: ETSI -> FCC Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 4a70c52f2940..470fe45be3cd 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -463,7 +463,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, - {CTRY_SINGAPORE, APL6_WORLD, "SG"}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG"}, {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, {CTRY_SLOVENIA, ETSI1_WORLD, "SI"}, {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA"}, -- cgit v1.2.3 From 4e78075c48212aa5e17cf0d35ccc2cd077e6f09a Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:13:52 +0300 Subject: ath: Map Ukraine to ETSI9_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 470fe45be3cd..44826f862cf7 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -479,7 +479,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, {CTRY_TURKEY, ETSI3_WORLD, "TR"}, {CTRY_UGANDA, FCC3_WORLD, "UG"}, - {CTRY_UKRAINE, NULL1_WORLD, "UA"}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA"}, {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, {CTRY_UNITED_STATES, FCC3_FCCA, "US"}, -- cgit v1.2.3 From f1abff21acc5c21bfba472c74d76f4430d06bdd3 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 23 May 2018 11:14:11 +0300 Subject: ath: Map Zimbabwe to ETSI1_WORLD The regdomain code is used to select the correct the correct conformance test limits (CTL) for a country. If the regdomain code isn't correctly mapped to the actual CTL entries in EEPROM then it could happen that the device violates the regulations. But it can also happen that the device is then not able to be used with its full txpower on all rates. The CTL mappings for this regdomain code were now changed to: * 2.4GHz: ETSI * 5GHz: NO_CTL -> ETSI Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/regd_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 44826f862cf7..4021e37a225a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -492,7 +492,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, {CTRY_YEMEN, NULL1_WORLD, "YE"}, - {CTRY_ZIMBABWE, NULL1_WORLD, "ZW"}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW"}, }; #endif -- cgit v1.2.3 From cf3c0ae6a32b5c3bb19b07d48dff646fc3896e51 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 23 May 2018 11:14:17 +0300 Subject: ath10k: remove useless test before clk_disable_unprepare clk_disable_unprepare() already checks that the clock pointer is valid. No need to test it before calling it. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 35d10490f6c3..fa39ffffd34d 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -180,14 +180,11 @@ static void ath10k_ahb_clock_disable(struct ath10k *ar) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); - if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) - clk_disable_unprepare(ar_ahb->cmd_clk); + clk_disable_unprepare(ar_ahb->cmd_clk); - if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) - clk_disable_unprepare(ar_ahb->ref_clk); + clk_disable_unprepare(ar_ahb->ref_clk); - if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) - clk_disable_unprepare(ar_ahb->rtc_clk); + clk_disable_unprepare(ar_ahb->rtc_clk); } static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) -- cgit v1.2.3 From 9a81cc23dfef24e254d3ff15c52bf46f0736f4c2 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:22 +0300 Subject: wcn36xx: fix buffer commit logic on TX path When wcn36xx_dxe_tx_frame() is entered while the device is still processing the queue asyncronously, we are racing against the firmware code with updates to the buffer descriptors. Presumably, the firmware scans the ring buffer that holds the descriptors and scans for a valid control descriptor, and then assumes that the next descriptor contains the payload. If, however, the control descriptor is marked valid, but the payload descriptor isn't, the packet is not sent out. Another issue with the current code is that is lacks memory barriers before descriptors are marked valid. This is important because the CPU may reorder writes to memory, even if it is allocated as coherent DMA area, and hence the device may see incompletely written data. To fix this, the code in wcn36xx_dxe_tx_frame() was restructured a bit so that the payload descriptor is made valid before the control descriptor. Memory barriers are added to ensure coherency of shared memory areas. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 75 +++++++++++++++++----------------- 1 file changed, 38 insertions(+), 37 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index bd2b946a65c9..3d1cf7dd1f8e 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -642,8 +642,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct sk_buff *skb, bool is_low) { - struct wcn36xx_dxe_ctl *ctl = NULL; - struct wcn36xx_dxe_desc *desc = NULL; + struct wcn36xx_dxe_desc *desc_bd, *desc_skb; + struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; struct wcn36xx_dxe_ch *ch = NULL; unsigned long flags; int ret; @@ -651,74 +651,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; spin_lock_irqsave(&ch->lock, flags); - ctl = ch->head_blk_ctl; + ctl_bd = ch->head_blk_ctl; + ctl_skb = ctl_bd->next; /* * If skb is not null that means that we reached the tail of the ring * hence ring is full. Stop queues to let mac80211 back off until ring * has an empty slot again. */ - if (NULL != ctl->next->skb) { + if (NULL != ctl_skb->skb) { ieee80211_stop_queues(wcn->hw); wcn->queues_stopped = true; spin_unlock_irqrestore(&ch->lock, flags); return -EBUSY; } - ctl->skb = NULL; - desc = ctl->desc; + if (unlikely(ctl_skb->bd_cpu_addr)) { + wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); + ret = -EINVAL; + goto unlock; + } + + desc_bd = ctl_bd->desc; + desc_skb = ctl_skb->desc; + + ctl_bd->skb = NULL; /* write buffer descriptor */ - memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd)); + memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd)); /* Set source address of the BD we send */ - desc->src_addr_l = ctl->bd_phy_addr; - - desc->dst_addr_l = ch->dxe_wq; - desc->fr_len = sizeof(struct wcn36xx_tx_bd); - desc->ctrl = ch->ctrl_bd; + desc_bd->src_addr_l = ctl_bd->bd_phy_addr; + desc_bd->dst_addr_l = ch->dxe_wq; + desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd); wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", - (char *)desc, sizeof(*desc)); + (char *)desc_bd, sizeof(*desc_bd)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, - "BD >>> ", (char *)ctl->bd_cpu_addr, + "BD >>> ", (char *)ctl_bd->bd_cpu_addr, sizeof(struct wcn36xx_tx_bd)); - /* Set source address of the SKB we send */ - ctl = ctl->next; - desc = ctl->desc; - if (ctl->bd_cpu_addr) { - wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); - ret = -EINVAL; - goto unlock; - } - - desc->src_addr_l = dma_map_single(wcn->dev, - skb->data, - skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(wcn->dev, desc->src_addr_l)) { + desc_skb->src_addr_l = dma_map_single(wcn->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) { dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); ret = -ENOMEM; goto unlock; } - ctl->skb = skb; - desc->dst_addr_l = ch->dxe_wq; - desc->fr_len = ctl->skb->len; - - /* set dxe descriptor to VALID */ - desc->ctrl = ch->ctrl_skb; + ctl_skb->skb = skb; + desc_skb->dst_addr_l = ch->dxe_wq; + desc_skb->fr_len = ctl_skb->skb->len; wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", - (char *)desc, sizeof(*desc)); + (char *)desc_skb, sizeof(*desc_skb)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", - (char *)ctl->skb->data, ctl->skb->len); + (char *)ctl_skb->skb->data, ctl_skb->skb->len); /* Move the head of the ring to the next empty descriptor */ - ch->head_blk_ctl = ctl->next; + ch->head_blk_ctl = ctl_skb->next; + + /* Commit all previous writes and set descriptors to VALID */ + wmb(); + desc_skb->ctrl = ch->ctrl_skb; + wmb(); + desc_bd->ctrl = ch->ctrl_bd; /* * When connected and trying to send data frame chip can be in sleep -- cgit v1.2.3 From 57e06e0e2a862bd40df47c7ad752fcad1d04c259 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:26 +0300 Subject: wcn36xx: set DMA mask explicitly The device takes 32-bit addresses only, so inform the DMA API about it. This is the default on msm8016, so that doesn't change anything, but it's best practice to be explicit. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e3b91b3b38ef..e6330ad372b3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1309,6 +1309,12 @@ static int wcn36xx_probe(struct platform_device *pdev) mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); + ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + wcn36xx_err("failed to set DMA mask: %d\n", ret); + goto out_wq; + } + INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); -- cgit v1.2.3 From ba437e72378c91a2fd6766014cc02fa98b61f2dc Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:30 +0300 Subject: wcn36xx: don't disable RX IRQ from handler There's no need to disable the IRQ from inside its handler. Instead just grab the spinlock of the channel that is being processed. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 3d1cf7dd1f8e..d6dd47b211ba 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -476,9 +476,8 @@ static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev) { struct wcn36xx *wcn = (struct wcn36xx *)dev; - disable_irq_nosync(wcn->rx_irq); wcn36xx_dxe_rx_frame(wcn); - enable_irq(wcn->rx_irq); + return IRQ_HANDLED; } @@ -514,8 +513,8 @@ out_err: static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) { - struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl; - struct wcn36xx_dxe_desc *dxe = ctl->desc; + struct wcn36xx_dxe_desc *dxe; + struct wcn36xx_dxe_ctl *ctl; dma_addr_t dma_addr; struct sk_buff *skb; int ret = 0, int_mask; @@ -529,6 +528,11 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, int_mask = WCN36XX_DXE_INT_CH3_MASK; } + spin_lock(&ch->lock); + + ctl = ch->head_blk_ctl; + dxe = ctl->desc; + while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; @@ -549,6 +553,9 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask); ch->head_blk_ctl = ctl; + + spin_unlock(&ch->lock); + return 0; } -- cgit v1.2.3 From edd23ab403cf092a20ea185770f197f502ac32f0 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:35 +0300 Subject: wcn36xx: clear all masks in RX interrupt Like on the TX side, check for the interrupt reason when the RX interrupt is latched and clear the ERR, DONE and ED masks. This seems to help with connection timeouts and network stream starvatations. And FWIW, the downstream driver does the same thing. Note that in analogy to the TX side, WCN36XX_DXE_0_INT_CLR should be set to WCN36XX_INT_MASK_CHAN_RX_{L,H} rather than WCN36XX_DXE_INT_CH{1,3}_MASK. It did the right thing however, as the defines happen to have identical values. Also, instead of determining register addresses and values inside wcn36xx_rx_handle_packets(), pass them as arguments. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 62 ++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index d6dd47b211ba..d11c9c536627 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -511,23 +511,40 @@ out_err: } static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, - struct wcn36xx_dxe_ch *ch) + struct wcn36xx_dxe_ch *ch, + u32 ctrl, + u32 en_mask, + u32 int_mask, + u32 status_reg) { struct wcn36xx_dxe_desc *dxe; struct wcn36xx_dxe_ctl *ctl; dma_addr_t dma_addr; struct sk_buff *skb; - int ret = 0, int_mask; - u32 value; + u32 int_reason; + int ret; - if (ch->ch_type == WCN36XX_DXE_CH_RX_L) { - value = WCN36XX_DXE_CTRL_RX_L; - int_mask = WCN36XX_DXE_INT_CH1_MASK; - } else { - value = WCN36XX_DXE_CTRL_RX_H; - int_mask = WCN36XX_DXE_INT_CH3_MASK; + wcn36xx_dxe_read_register(wcn, status_reg, &int_reason); + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask); + + if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ERR_CLR, + int_mask); + + wcn36xx_err("DXE IRQ reported error on RX channel\n"); } + if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_DONE_CLR, + int_mask); + + if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ED_CLR, + int_mask); + spin_lock(&ch->lock); ctl = ch->head_blk_ctl; @@ -546,11 +563,11 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, wcn36xx_rx_skb(wcn, skb); } /* else keep old skb not submitted and use it for rx DMA */ - dxe->ctrl = value; + dxe->ctrl = ctrl; ctl = ctl->next; dxe = ctl->desc; } - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask); + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask); ch->head_blk_ctl = ctl; @@ -566,19 +583,20 @@ void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); /* RX_LOW_PRI */ - if (int_src & WCN36XX_DXE_INT_CH1_MASK) { - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, - WCN36XX_DXE_INT_CH1_MASK); - wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch)); - } + if (int_src & WCN36XX_DXE_INT_CH1_MASK) + wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch, + WCN36XX_DXE_CTRL_RX_L, + WCN36XX_DXE_INT_CH1_MASK, + WCN36XX_INT_MASK_CHAN_RX_L, + WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L); /* RX_HIGH_PRI */ - if (int_src & WCN36XX_DXE_INT_CH3_MASK) { - /* Clean up all the INT within this channel */ - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, - WCN36XX_DXE_INT_CH3_MASK); - wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch)); - } + if (int_src & WCN36XX_DXE_INT_CH3_MASK) + wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch, + WCN36XX_DXE_CTRL_RX_H, + WCN36XX_DXE_INT_CH3_MASK, + WCN36XX_INT_MASK_CHAN_RX_H, + WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H); if (!int_src) wcn36xx_warn("No DXE interrupt pending\n"); -- cgit v1.2.3 From ce1d4be82b1009374f7bea0229fb6758cb1afb84 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:40 +0300 Subject: wcn36xx: only handle packets when ED or DONE bit is set On RX and TX interrupts, check for the WCN36XX_CH_STAT_INT_ED_MASK or WCN36XX_CH_STAT_INT_DONE_MASK in the interrupt reason register, and only handle packets when it is set. This way, reap_tx_dxes() is only invoked when needed. This brings the dequeing logic in line with what the prima downstream driver is doing. While at it, also log the interrupt reason. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index d11c9c536627..8c64e05ca3b7 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -430,8 +430,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_INT_MASK_CHAN_TX_H); } - wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n"); - reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n", + int_reason); + + if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK)) + reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); } if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) { @@ -465,8 +469,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_INT_MASK_CHAN_TX_L); } - wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n"); - reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n", + int_reason); + + if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK)) + reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); } return IRQ_HANDLED; @@ -545,6 +553,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, WCN36XX_DXE_0_INT_ED_CLR, int_mask); + if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK))) + return 0; + spin_lock(&ch->lock); ctl = ch->head_blk_ctl; -- cgit v1.2.3 From 18c7ed138824119f2ef9c9019e222e3e750b42b8 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:44 +0300 Subject: wcn36xx: consider CTRL_EOP bit when looking for valid descriptors In reap_tx_dxes(), when we iterate over the linked descriptors, only consider such valid that have WCN36xx_DXE_CTRL_EOP set. This is what the prima downstream driver is doing as well. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 8c64e05ca3b7..06cfe8d311f3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -370,7 +370,9 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) do { if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD) break; - if (ctl->skb) { + + if (ctl->skb && + READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) { dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, ctl->skb->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(ctl->skb); -- cgit v1.2.3 From 2a46c829a9266bf2a2be1a3d25dbac5fcd4eb9c1 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:50 +0300 Subject: wcn36xx: set PREASSOC and IDLE stated when BSS info changes When a BSSID is joined, set the link status to 'preassoc', and set it to 'idle' when the BSS is deleted. This is what the downstream driver is doing, and it seems to improve the reliability during connect/disconnect stress tests. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e6330ad372b3..662e50540b07 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -798,6 +798,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, if (!is_zero_ether_addr(bss_conf->bssid)) { vif_priv->is_joining = true; vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; + wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, + WCN36XX_HAL_LINK_PREASSOC_STATE); wcn36xx_smd_join(wcn, bss_conf->bssid, vif->addr, WCN36XX_HW_CHANNEL(wcn)); wcn36xx_smd_config_bss(wcn, vif, NULL, @@ -805,6 +807,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } else { vif_priv->is_joining = false; wcn36xx_smd_delete_bss(wcn, vif); + wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, + WCN36XX_HAL_LINK_IDLE_STATE); vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; } } -- cgit v1.2.3 From 773f9a28bcdafd6cb77fb1afc545c62746d53990 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:55 +0300 Subject: wcn36xx: drain pending indicator messages on shutdown When the interface is shut down, wcn36xx_smd_close() potentially races against the queue worker. Make sure to cancel the work, and then free all the remnants in hal_ind_queue manually. This is again just a theoretical issue, not something that was triggered in the wild. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ea74f2b92df5..0a505b5e038b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2513,5 +2513,11 @@ out: void wcn36xx_smd_close(struct wcn36xx *wcn) { + struct wcn36xx_hal_ind_msg *msg, *tmp; + + cancel_work_sync(&wcn->hal_ind_work); destroy_workqueue(wcn->hal_ind_wq); + + list_for_each_entry_safe(msg, tmp, &wcn->hal_ind_queue, list) + kfree(msg); } -- cgit v1.2.3 From a50c6c8412f7114cbb0514dbea7a5a9a3d317479 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:14:59 +0300 Subject: wcn36xx: simplify wcn36xx_smd_open() Drop the extra warning about failed allocations, both the core and the only caller of this function will warn loud enough in such cases. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 0a505b5e038b..43c8aa79fad4 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2494,21 +2494,15 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) } int wcn36xx_smd_open(struct wcn36xx *wcn) { - int ret = 0; wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind"); - if (!wcn->hal_ind_wq) { - wcn36xx_err("failed to allocate wq\n"); - ret = -ENOMEM; - goto out; - } + if (!wcn->hal_ind_wq) + return -ENOMEM; + INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work); INIT_LIST_HEAD(&wcn->hal_ind_queue); spin_lock_init(&wcn->hal_ind_lock); return 0; - -out: - return ret; } void wcn36xx_smd_close(struct wcn36xx *wcn) -- cgit v1.2.3 From ffbc9197b4721634dc6c0fefa9b31e565fa89cee Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 23 May 2018 11:15:03 +0300 Subject: wcn36xx: improve debug and error messages for SMD Add a missing newline in wcn36xx_smd_send_and_wait() and also log the command request and response type that was processed. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/smd.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 43c8aa79fad4..b855a58d5aac 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -252,23 +252,29 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) { int ret = 0; unsigned long start; + struct wcn36xx_hal_msg_header *hdr = + (struct wcn36xx_hal_msg_header *)wcn->hal_buf; + u16 req_type = hdr->msg_type; + wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len); init_completion(&wcn->hal_rsp_compl); start = jiffies; ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { - wcn36xx_err("HAL TX failed\n"); + wcn36xx_err("HAL TX failed for req %d\n", req_type); goto out; } if (wait_for_completion_timeout(&wcn->hal_rsp_compl, msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) { - wcn36xx_err("Timeout! No SMD response in %dms\n", - HAL_MSG_TIMEOUT); + wcn36xx_err("Timeout! No SMD response to req %d in %dms\n", + req_type, HAL_MSG_TIMEOUT); ret = -ETIME; goto out; } - wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms", + wcn36xx_dbg(WCN36XX_DBG_SMD, + "SMD command (req %d, rsp %d) completed in %dms\n", + req_type, hdr->msg_type, jiffies_to_msecs(jiffies - start)); out: return ret; -- cgit v1.2.3 From f40105e6747892e8edab94020567c158c9bec0df Mon Sep 17 00:00:00 2001 From: Sriram R Date: Tue, 15 May 2018 14:39:48 +0530 Subject: ath: add support to get the detected radar specifications This enables ath10k/ath9k drivers to collect the specifications of the radar type once it is detected by the dfs pattern detector unit. Usage of the collected info is specific to driver implementation. For example, collected radar info could be used by the host driver to send to co-processors for additional processing/validation. Note: 'radar_detector_specs' data containing the specifications of different radar types which was private within dfs_pattern_detector/ dfs_pri_detector is now shared with drivers as well for making use of this information. Signed-off-by: Sriram R Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 2 +- drivers/net/wireless/ath/ath9k/dfs.c | 2 +- drivers/net/wireless/ath/dfs_pattern_detector.c | 5 ++++- drivers/net/wireless/ath/dfs_pattern_detector.h | 3 ++- drivers/net/wireless/ath/dfs_pri_detector.h | 3 ++- 5 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 46fb96ee5852..1bccac002e97 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3757,7 +3757,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, ATH10K_DFS_STAT_INC(ar, pulses_detected); - if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { + if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, NULL)) { ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs no pulse pattern detected, yet\n"); return; diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index c8844f55574c..acb9602aa464 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -277,7 +277,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe) DFS_STAT_INC(sc, pulses_processed); if (pd == NULL) return; - if (!pd->add_pulse(pd, pe)) + if (!pd->add_pulse(pd, pe, NULL)) return; DFS_STAT_INC(sc, radar_detected); ieee80211_radar_detected(sc->hw); diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 448b83eea810..d52b31b45df7 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -268,7 +268,8 @@ static void dpd_exit(struct dfs_pattern_detector *dpd) } static bool -dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) +dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event, + struct radar_detector_specs *rs) { u32 i; struct channel_detector *cd; @@ -294,6 +295,8 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) struct pri_detector *pd = cd->detectors[i]; struct pri_sequence *ps = pd->add_pulse(pd, event); if (ps != NULL) { + if (rs != NULL) + memcpy(rs, pd->rs, sizeof(*rs)); ath_dbg(dpd->common, DFS, "DFS: radar found on freq=%d: id=%d, pri=%d, " "count=%d, count_false=%d\n", diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h index 92be3530e9b5..18db6f4f3568 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.h +++ b/drivers/net/wireless/ath/dfs_pattern_detector.h @@ -97,7 +97,8 @@ struct dfs_pattern_detector { bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd, enum nl80211_dfs_regions region); bool (*add_pulse)(struct dfs_pattern_detector *dpd, - struct pulse_event *pe); + struct pulse_event *pe, + struct radar_detector_specs *rs); struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd); enum nl80211_dfs_regions region; diff --git a/drivers/net/wireless/ath/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h index 79f0fff4d1e6..86339f2b4d3a 100644 --- a/drivers/net/wireless/ath/dfs_pri_detector.h +++ b/drivers/net/wireless/ath/dfs_pri_detector.h @@ -62,8 +62,9 @@ struct pri_detector { (*add_pulse)(struct pri_detector *de, struct pulse_event *e); void (*reset) (struct pri_detector *de, u64 ts); -/* private: internal use only */ const struct radar_detector_specs *rs; + +/* private: internal use only */ u64 last_ts; struct list_head sequences; struct list_head pulses; -- cgit v1.2.3 From 6f6eb1bcbeff48c875617b800f00f1c5d1b12290 Mon Sep 17 00:00:00 2001 From: Sriram R Date: Tue, 15 May 2018 14:39:49 +0530 Subject: ath10k: DFS Host Confirmation In the 10.4-3.6 firmware branch there's a new DFS Host confirmation feature which is advertised using WMI_SERVICE_HOST_DFS_CHECK_SUPPORT flag. This new features enables the ath10k host to send information to the firmware on the specifications of detected radar type. This allows the firmware to validate if the host's radar pattern detector unit is operational and check if the radar information shared by host matches the radar pulses sent as phy error events from firmware. If the check fails the firmware won't allow use of DFS channels on AP mode when using FCC regulatory region. Hence this patch is mandatory when using a firmware from 10.4-3.6 branch. Else, DFS channels on FCC regions cannot be used. Supported Chipsets : QCA9984/QCA9888/QCA4019 Firmware Version : 10.4-3.6-00104 Signed-off-by: Sriram R Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 21 ++++ drivers/net/wireless/ath/ath10k/mac.c | 12 ++ drivers/net/wireless/ath/ath10k/wmi-ops.h | 32 +++++ drivers/net/wireless/ath/ath10k/wmi.c | 186 ++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/wmi.h | 32 +++++ 5 files changed, 273 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index eb7ff1116442..951dbdd1c9eb 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -176,6 +176,7 @@ struct ath10k_wmi { struct completion service_ready; struct completion unified_ready; struct completion barrier; + struct completion radar_confirm; wait_queue_head_t tx_credits_wq; DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); struct wmi_cmd_map *cmd; @@ -378,6 +379,21 @@ struct ath10k_dfs_stats { u32 radar_detected; }; +enum ath10k_radar_confirmation_state { + ATH10K_RADAR_CONFIRMATION_IDLE = 0, + ATH10K_RADAR_CONFIRMATION_INPROGRESS, + ATH10K_RADAR_CONFIRMATION_STOPPED, +}; + +struct ath10k_radar_found_info { + u32 pri_min; + u32 pri_max; + u32 width_min; + u32 width_max; + u32 sidx_min; + u32 sidx_max; +}; + #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ struct ath10k_peer { @@ -1110,6 +1126,11 @@ struct ath10k { u32 sta_tid_stats_mask; + /* protected by data_lock */ + enum ath10k_radar_confirmation_state radar_conf_state; + struct ath10k_radar_found_info last_radar_info; + struct work_struct radar_confirmation_work; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e3b67ffdab72..e9c2fb318c03 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3217,6 +3217,15 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); } +static void ath10k_stop_radar_confirmation(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; + spin_unlock_bh(&ar->data_lock); + + cancel_work_sync(&ar->radar_confirmation_work); +} + /***************/ /* TX handlers */ /***************/ @@ -4333,6 +4342,7 @@ void ath10k_halt(struct ath10k *ar) ath10k_scan_finish(ar); ath10k_peer_cleanup_all(ar); + ath10k_stop_radar_confirmation(ar); ath10k_core_stop(ar); ath10k_hif_power_down(ar); @@ -4758,6 +4768,8 @@ static int ath10k_start(struct ieee80211_hw *hw) ath10k_spectral_start(ar); ath10k_thermal_set_throttling(ar); + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + mutex_unlock(&ar->conf_mutex); return 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index e37d16b31afe..5ecce04005d2 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -55,6 +55,8 @@ struct wmi_ops { struct wmi_wow_ev_arg *arg); int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, struct wmi_echo_ev_arg *arg); + int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg); int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, struct wmi_svc_avail_ev_arg *arg); @@ -188,6 +190,9 @@ struct wmi_ops { const struct wmi_tdls_peer_update_cmd_arg *arg, const struct wmi_tdls_peer_capab_arg *cap, const struct wmi_channel_arg *chan); + struct sk_buff *(*gen_radar_found) + (struct ath10k *ar, + const struct ath10k_radar_found_info *arg); struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, u32 param); @@ -395,6 +400,16 @@ ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_echo_ev(ar, skb, arg); } +static inline int +ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_dfs_status_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); +} + static inline enum wmi_txbf_conf ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) { @@ -1511,4 +1526,21 @@ ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) ar->wmi.cmd->pdev_get_tpc_table_cmdid); } +static inline int +ath10k_wmi_report_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_radar_found) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_radar_found(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->radar_found_cmdid); +} + #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 1bccac002e97..f97ab795cf2e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -34,6 +34,7 @@ #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) +#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6) /* MAIN WMI cmd track */ static struct wmi_cmd_map wmi_cmd_map = { @@ -199,6 +200,7 @@ static struct wmi_cmd_map wmi_cmd_map = { .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.X WMI cmd track */ @@ -367,6 +369,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.2.4 WMI cmd track */ @@ -535,6 +538,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .pdev_bss_chan_info_request_cmdid = WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.4 WMI cmd track */ @@ -745,6 +749,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID, }; /* MAIN WMI VDEV param map */ @@ -1490,6 +1495,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { @@ -3683,6 +3689,68 @@ void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); } +static void ath10k_radar_detected(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); + ATH10K_DFS_STAT_INC(ar, radar_detected); + + /* Control radar events reporting in debugfs file + * dfs_block_radar_events + */ + if (ar->dfs_block_radar_events) + ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); + else + ieee80211_radar_detected(ar->hw); +} + +static void ath10k_radar_confirmation_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + radar_confirmation_work); + struct ath10k_radar_found_info radar_info; + int ret, time_left; + + reinit_completion(&ar->wmi.radar_confirm); + + spin_lock_bh(&ar->data_lock); + memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info)); + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_report_radar_found(ar, &radar_info); + if (ret) { + ath10k_warn(ar, "failed to send radar found %d\n", ret); + goto wait_complete; + } + + time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm, + ATH10K_WMI_DFS_CONF_TIMEOUT_HZ); + if (time_left) { + /* DFS Confirmation status event received and + * necessary action completed. + */ + goto wait_complete; + } else { + /* DFS Confirmation event not received from FW.Considering this + * as real radar. + */ + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs confirmation not received from fw, considering as radar\n"); + goto radar_detected; + } + +radar_detected: + ath10k_radar_detected(ar); + + /* Reset state to allow sending confirmation on consecutive radar + * detections, unless radar confirmation is disabled/stopped. + */ +wait_complete: + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED) + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + spin_unlock_bh(&ar->data_lock); +} + static void ath10k_dfs_radar_report(struct ath10k *ar, struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_radar_report *rr, @@ -3691,8 +3759,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, u32 reg0, reg1, tsf32l; struct ieee80211_channel *ch; struct pulse_event pe; + struct radar_detector_specs rs; u64 tsf64; u8 rssi, width; + struct ath10k_radar_found_info *radar_info; reg0 = __le32_to_cpu(rr->reg0); reg1 = __le32_to_cpu(rr->reg1); @@ -3757,25 +3827,46 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, ATH10K_DFS_STAT_INC(ar, pulses_detected); - if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, NULL)) { + if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) { ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs no pulse pattern detected, yet\n"); return; } -radar_detected: - ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); - ATH10K_DFS_STAT_INC(ar, radar_detected); + if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) && + ar->dfs_detector->region == NL80211_DFS_FCC) { + /* Consecutive radar indications need not be + * sent to the firmware until we get confirmation + * for the previous detected radar. + */ + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) { + spin_unlock_bh(&ar->data_lock); + return; + } + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS; + radar_info = &ar->last_radar_info; - /* Control radar events reporting in debugfs file - * dfs_block_radar_events - */ - if (ar->dfs_block_radar_events) { - ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); + radar_info->pri_min = rs.pri_min; + radar_info->pri_max = rs.pri_max; + radar_info->width_min = rs.width_min; + radar_info->width_max = rs.width_max; + /*TODO Find sidx_min and sidx_max */ + radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + radar_info->pri_min, radar_info->pri_max, + radar_info->width_min, radar_info->width_max, + radar_info->sidx_min, radar_info->sidx_max); + ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work); + spin_unlock_bh(&ar->data_lock); return; } - ieee80211_radar_detected(ar->hw); +radar_detected: + ath10k_radar_detected(ar); } static int ath10k_dfs_fft_report(struct ath10k *ar, @@ -4125,6 +4216,47 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) } } +static int +ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + struct wmi_dfs_status_ev_arg *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + arg->status = ev->status; + + return 0; +} + +static void +ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_dfs_status_ev_arg status_arg = {}; + int ret; + + ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg); + + if (ret) { + ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs status event received from fw: %d\n", + status_arg.status); + + /* Even in case of radar detection failure we follow the same + * behaviour as if radar is detected i.e to switch to a different + * channel. + */ + if (status_arg.status == WMI_HW_RADAR_DETECTED || + status_arg.status == WMI_RADAR_DETECTION_FAIL) + ath10k_radar_detected(ar); + complete(&ar->wmi.radar_confirm); +} + void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) { struct wmi_roam_ev_arg arg = {}; @@ -5876,6 +6008,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_PDEV_TPC_TABLE_EVENTID: ath10k_wmi_event_tpc_final_table(ar, skb); break; + case WMI_10_4_DFS_STATUS_CHECK_EVENTID: + ath10k_wmi_event_dfs_status_check(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -8461,6 +8596,32 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, return skb; } +static struct sk_buff * +ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct wmi_radar_found_info *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_radar_found_info *)skb->data; + cmd->pri_min = __cpu_to_le32(arg->pri_min); + cmd->pri_max = __cpu_to_le32(arg->pri_max); + cmd->width_min = __cpu_to_le32(arg->width_min); + cmd->width_max = __cpu_to_le32(arg->width_max); + cmd->sidx_min = __cpu_to_le32(arg->sidx_min); + cmd->sidx_max = __cpu_to_le32(arg->sidx_max); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + arg->pri_min, arg->pri_max, arg->width_min, + arg->width_max, arg->sidx_min, arg->sidx_max); + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) { @@ -8798,6 +8959,7 @@ static const struct wmi_ops wmi_10_4_ops = { .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev, .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, @@ -8844,6 +9006,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, .gen_pdev_get_tpc_table_cmdid = ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, + .gen_radar_found = ath10k_wmi_10_4_gen_radar_found, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, @@ -8906,8 +9069,11 @@ int ath10k_wmi_attach(struct ath10k *ar) init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); init_completion(&ar->wmi.barrier); + init_completion(&ar->wmi.radar_confirm); INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + INIT_WORK(&ar->radar_confirmation_work, + ath10k_radar_confirmation_work); return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 700b12f46baf..b48db54e9865 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -969,6 +969,7 @@ struct wmi_cmd_map { u32 vdev_sifs_trigger_time_cmdid; u32 pdev_wds_entry_list_cmdid; u32 tdls_set_offchan_mode_cmdid; + u32 radar_found_cmdid; }; /* @@ -1803,6 +1804,11 @@ enum wmi_10_4_cmd_id { WMI_10_4_TDLS_SET_STATE_CMDID, WMI_10_4_TDLS_PEER_UPDATE_CMDID, WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + WMI_10_4_PDEV_SEND_FD_CMDID, + WMI_10_4_ENABLE_FILS_CMDID, + WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID, + WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID, + WMI_10_4_RADAR_FOUND_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1878,6 +1884,9 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_TPC_TABLE_EVENTID, WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID, WMI_10_4_TDLS_PEER_EVENTID, + WMI_10_4_HOST_SWFDA_EVENTID, + WMI_10_4_ESP_ESTIMATE_EVENTID, + WMI_10_4_DFS_STATUS_CHECK_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -3398,6 +3407,25 @@ struct wmi_10_4_phyerr_event { u8 buf[0]; } __packed; +struct wmi_radar_found_info { + __le32 pri_min; + __le32 pri_max; + __le32 width_min; + __le32 width_max; + __le32 sidx_min; + __le32 sidx_max; +} __packed; + +enum wmi_radar_confirmation_status { + /* Detected radar was due to SW pulses */ + WMI_SW_RADAR_DETECTED = 0, + + WMI_RADAR_DETECTION_FAIL = 1, + + /* Real radar detected */ + WMI_HW_RADAR_DETECTED = 2, +}; + #define PHYERR_TLV_SIG 0xBB #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 @@ -6631,6 +6659,10 @@ struct wmi_phyerr_hdr_arg { const void *phyerrs; }; +struct wmi_dfs_status_ev_arg { + u32 status; +}; + struct wmi_svc_rdy_ev_arg { __le32 min_tx_power; __le32 max_tx_power; -- cgit v1.2.3 From 87f825e6e246cee0cdeaafd5bba4ef8c6df2748d Mon Sep 17 00:00:00 2001 From: Eyal Ilsar Date: Tue, 22 May 2018 22:02:56 +0300 Subject: wcn36xx: Add support for Factory Test Mode (FTM) Introduce infrastructure for supporting Factory Test Mode (FTM) of the wireless LAN subsystem. In order for the user space to access the firmware in test mode the relevant netlink channel needs to be exposed from the kernel driver. The above is achieved as follows: 1) Register wcn36xx driver to testmode callback from netlink 2) Add testmode callback implementation to handle incoming FTM commands 3) Add FTM command packet structure 4) Add handling for GET_BUILD_RELEASE_NUMBER (msgid=0x32A2) 5) Add generic handling for all PTT_MSG packets Signed-off-by: Eyal Ilsar Signed-off-by: Ramon Fried Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/Makefile | 2 + drivers/net/wireless/ath/wcn36xx/hal.h | 16 +++ drivers/net/wireless/ath/wcn36xx/main.c | 3 + drivers/net/wireless/ath/wcn36xx/smd.c | 81 ++++++++++++++ drivers/net/wireless/ath/wcn36xx/smd.h | 4 + drivers/net/wireless/ath/wcn36xx/testmode.c | 149 ++++++++++++++++++++++++++ drivers/net/wireless/ath/wcn36xx/testmode.h | 46 ++++++++ drivers/net/wireless/ath/wcn36xx/testmode_i.h | 29 +++++ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 2 + 9 files changed, 332 insertions(+) create mode 100644 drivers/net/wireless/ath/wcn36xx/testmode.c create mode 100644 drivers/net/wireless/ath/wcn36xx/testmode.h create mode 100644 drivers/net/wireless/ath/wcn36xx/testmode_i.h (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile index 3b09435104eb..582049f65735 100644 --- a/drivers/net/wireless/ath/wcn36xx/Makefile +++ b/drivers/net/wireless/ath/wcn36xx/Makefile @@ -6,3 +6,5 @@ wcn36xx-y += main.o \ smd.o \ pmc.o \ debug.o + +wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 2aed6c233508..8abda2760e04 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -2236,6 +2236,22 @@ struct wcn36xx_hal_switch_channel_rsp_msg { } __packed; +struct wcn36xx_hal_process_ptt_msg_req_msg { + struct wcn36xx_hal_msg_header header; + + /* Actual FTM Command body */ + u8 ptt_msg[0]; +} __packed; + +struct wcn36xx_hal_process_ptt_msg_rsp_msg { + struct wcn36xx_hal_msg_header header; + + /* FTM Command response status */ + u32 ptt_msg_resp_status; + /* Actual FTM Command body */ + u8 ptt_msg[0]; +} __packed; + struct update_edca_params_req_msg { struct wcn36xx_hal_msg_header header; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 662e50540b07..aeb5e6e806be 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -26,6 +26,7 @@ #include #include #include "wcn36xx.h" +#include "testmode.h" unsigned int wcn36xx_dbg_mask; module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644); @@ -1146,6 +1147,8 @@ static const struct ieee80211_ops wcn36xx_ops = { .sta_add = wcn36xx_sta_add, .sta_remove = wcn36xx_sta_remove, .ampdu_action = wcn36xx_ampdu_action, + + CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd) }; static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index b855a58d5aac..b4dadf75d565 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -298,12 +298,26 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr, msg_body.header.len = sizeof(msg_body); \ } while (0) \ +#define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \ + do { \ + memset(p_msg_body, 0, sizeof(*p_msg_body) + ppt_msg_len); \ + p_msg_body->header.msg_type = WCN36XX_HAL_PROCESS_PTT_REQ; \ + p_msg_body->header.msg_version = WCN36XX_HAL_MSG_VERSION0; \ + p_msg_body->header.len = sizeof(*p_msg_body) + ppt_msg_len; \ + } while (0) + #define PREPARE_HAL_BUF(send_buf, msg_body) \ do { \ memset(send_buf, 0, msg_body.header.len); \ memcpy(send_buf, &msg_body, sizeof(msg_body)); \ } while (0) \ +#define PREPARE_HAL_PTT_MSG_BUF(send_buf, p_msg_body) \ + do { \ + memset(send_buf, 0, p_msg_body->header.len); \ + memcpy(send_buf, p_msg_body, p_msg_body->header.len); \ + } while (0) + static int wcn36xx_smd_rsp_status_check(void *buf, size_t len) { struct wcn36xx_fw_msg_status_rsp *rsp; @@ -760,6 +774,71 @@ out: return ret; } +static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len, + void **p_ptt_rsp_msg) +{ + struct wcn36xx_hal_process_ptt_msg_rsp_msg *rsp; + int ret; + + ret = wcn36xx_smd_rsp_status_check(buf, len); + if (ret) + return ret; + + rsp = (struct wcn36xx_hal_process_ptt_msg_rsp_msg *)buf; + + wcn36xx_dbg(WCN36XX_DBG_HAL, "process ptt msg responded with length %d\n", + rsp->header.len); + wcn36xx_dbg_dump(WCN36XX_DBG_HAL_DUMP, "HAL_PTT_MSG_RSP:", rsp->ptt_msg, + rsp->header.len - sizeof(rsp->ptt_msg_resp_status)); + + if (rsp->header.len > 0) { + *p_ptt_rsp_msg = kmalloc(rsp->header.len, GFP_ATOMIC); + if (!*p_ptt_rsp_msg) + return -ENOMEM; + memcpy(*p_ptt_rsp_msg, rsp->ptt_msg, rsp->header.len); + } + return ret; +} + +int wcn36xx_smd_process_ptt_msg(struct wcn36xx *wcn, + struct ieee80211_vif *vif, void *ptt_msg, size_t len, + void **ptt_rsp_msg) +{ + struct wcn36xx_hal_process_ptt_msg_req_msg *p_msg_body; + int ret; + + mutex_lock(&wcn->hal_mutex); + p_msg_body = kmalloc( + sizeof(struct wcn36xx_hal_process_ptt_msg_req_msg) + len, + GFP_ATOMIC); + if (!p_msg_body) { + ret = -ENOMEM; + goto out_nomem; + } + INIT_HAL_PTT_MSG(p_msg_body, len); + + memcpy(&p_msg_body->ptt_msg, ptt_msg, len); + + PREPARE_HAL_PTT_MSG_BUF(wcn->hal_buf, p_msg_body); + + ret = wcn36xx_smd_send_and_wait(wcn, p_msg_body->header.len); + if (ret) { + wcn36xx_err("Sending hal_process_ptt_msg failed\n"); + goto out; + } + ret = wcn36xx_smd_process_ptt_msg_rsp(wcn->hal_buf, wcn->hal_rsp_len, + ptt_rsp_msg); + if (ret) { + wcn36xx_err("process_ptt_msg response failed err=%d\n", ret); + goto out; + } +out: + kfree(p_msg_body); +out_nomem: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) { struct wcn36xx_hal_update_scan_params_resp *rsp; @@ -2396,6 +2475,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_JOIN_RSP: case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP: case WCN36XX_HAL_CH_SWITCH_RSP: + case WCN36XX_HAL_PROCESS_PTT_RSP: case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP: case WCN36XX_HAL_8023_MULTICAST_LIST_RSP: case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP: @@ -2436,6 +2516,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, return 0; } + static void wcn36xx_ind_smd_work(struct work_struct *work) { struct wcn36xx *wcn = diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 61bb8d43138c..ff15df8ab56f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -86,6 +86,10 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, u16 p2p_off); int wcn36xx_smd_switch_channel(struct wcn36xx *wcn, struct ieee80211_vif *vif, int ch); +int wcn36xx_smd_process_ptt_msg(struct wcn36xx *wcn, + struct ieee80211_vif *vif, + void *ptt_msg, size_t len, + void **ptt_rsp_msg); int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct sk_buff *skb); diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c new file mode 100644 index 000000000000..1279064a3b71 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "wcn36xx.h" + +#include "testmode.h" +#include "testmode_i.h" +#include "hal.h" +#include "smd.h" + +static const struct nla_policy wcn36xx_tm_policy[WCN36XX_TM_ATTR_MAX + 1] = { + [WCN36XX_TM_ATTR_CMD] = { .type = NLA_U16 }, + [WCN36XX_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = WCN36XX_TM_DATA_MAX_LEN }, +}; + +struct build_release_number { + u16 drv_major; + u16 drv_minor; + u16 drv_patch; + u16 drv_build; + u16 ptt_max; + u16 ptt_min; + u16 fw_ver; +} __packed; + +static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif, + struct nlattr *tb[]) +{ + int ret = 0, buf_len; + void *buf; + struct ftm_rsp_msg *msg, *rsp = NULL; + struct sk_buff *skb; + + if (!tb[WCN36XX_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]); + buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]); + msg = (struct ftm_rsp_msg *)buf; + + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n", + msg->msg_id, msg->msg_body_length, + buf, buf_len); + + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "REQ ", buf, buf_len); + + if (msg->msg_id == MSG_GET_BUILD_RELEASE_NUMBER) { + struct build_release_number *body = + (struct build_release_number *) + msg->msg_response; + + body->drv_major = wcn->fw_major; + body->drv_minor = wcn->fw_minor; + body->drv_patch = wcn->fw_version; + body->drv_build = wcn->fw_revision; + body->ptt_max = 10; + body->ptt_min = 0; + + rsp = msg; + rsp->resp_status = 0; + } else { + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "PPT Request >> HAL size %d\n", + msg->msg_body_length); + + msg->resp_status = wcn36xx_smd_process_ptt_msg(wcn, vif, msg, + msg->msg_body_length, (void *)(&rsp)); + + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "Response status = %d\n", + msg->resp_status); + if (rsp) + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "PPT Response << HAL size %d\n", + rsp->msg_body_length); + } + + if (!rsp) { + rsp = msg; + wcn36xx_warn("No response! Echoing request with response status %d\n", + rsp->resp_status); + } + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "RSP ", + rsp, rsp->msg_body_length); + + skb = cfg80211_testmode_alloc_reply_skb(wcn->hw->wiphy, + nla_total_size(msg->msg_body_length)); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ret = nla_put(skb, WCN36XX_TM_ATTR_DATA, rsp->msg_body_length, rsp); + if (ret) { + kfree_skb(skb); + goto out; + } + + ret = cfg80211_testmode_reply(skb); + +out: + if (rsp != msg) + kfree(rsp); + + return ret; +} + +int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct wcn36xx *wcn = hw->priv; + struct nlattr *tb[WCN36XX_TM_ATTR_MAX + 1]; + int ret = 0; + unsigned short attr; + + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len); + ret = nla_parse(tb, WCN36XX_TM_ATTR_MAX, data, len, + wcn36xx_tm_policy, NULL); + if (ret) + return ret; + + if (!tb[WCN36XX_TM_ATTR_CMD]) + return -EINVAL; + + attr = nla_get_u16(tb[WCN36XX_TM_ATTR_CMD]); + + if (attr != WCN36XX_TM_CMD_PTT) + return -EOPNOTSUPP; + + return wcn36xx_tm_cmd_ptt(wcn, vif, tb); +} diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.h b/drivers/net/wireless/ath/wcn36xx/testmode.h new file mode 100644 index 000000000000..4c6cfdb46580 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wcn36xx.h" + +struct ftm_rsp_msg { + u16 msg_id; + u16 msg_body_length; + u32 resp_status; + u8 msg_response[0]; +} __packed; + +/* The request buffer of FTM which contains a byte of command and the request */ +struct ftm_payload { + u16 ftm_cmd_type; + struct ftm_rsp_msg ftm_cmd_msg; +} __packed; + +#define MSG_GET_BUILD_RELEASE_NUMBER 0x32A2 + +#ifdef CONFIG_NL80211_TESTMODE +int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else +static inline int wcn36xx_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/wcn36xx/testmode_i.h b/drivers/net/wireless/ath/wcn36xx/testmode_i.h new file mode 100644 index 000000000000..8a1477ffd5a0 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode_i.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define WCN36XX_TM_DATA_MAX_LEN 5000 + +enum wcn36xx_tm_attr { + __WCN36XX_TM_ATTR_INVALID = 0, + WCN36XX_TM_ATTR_CMD = 1, + WCN36XX_TM_ATTR_DATA = 2, + + /* keep last */ + __WCN36XX_TM_ATTR_AFTER_LAST, + WCN36XX_TM_ATTR_MAX = __WCN36XX_TM_ATTR_AFTER_LAST - 1, +}; + +#define WCN36XX_TM_CMD_PTT 3 diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 9343989d1169..11e74015c79a 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -50,6 +50,8 @@ enum wcn36xx_debug_mask { WCN36XX_DBG_BEACON_DUMP = 0x00001000, WCN36XX_DBG_PMC = 0x00002000, WCN36XX_DBG_PMC_DUMP = 0x00004000, + WCN36XX_DBG_TESTMODE = 0x00008000, + WCN36XX_DBG_TESTMODE_DUMP = 0x00010000, WCN36XX_DBG_ANY = 0xffffffff, }; -- cgit v1.2.3 From 0c235113b3c42197dba66baf76697359b03a5046 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Thu, 24 May 2018 10:14:00 +0100 Subject: sfc: stop the TX queue before pushing new buffers efx_enqueue_skb() can push new buffers for the xmit_more functionality. We must stops the TX queue before this or else the TX queue does not get restarted and we get a netdev watchdog. In the error handling we may now need to unwind more than 1 packet, and we may need to push the new buffers onto the partner queue. v2: In the error leg also push this queue if xmit_more is set Fixes: e9117e5099ea ("sfc: Firmware-Assisted TSO version 2") Reported-by: Jarod Wilson Tested-by: Jarod Wilson Signed-off-by: Martin Habets Acked-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/tx.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index cece961f2e82..c3ad564ac4c0 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -435,17 +435,18 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, } while (1); } -/* Remove buffers put into a tx_queue. None of the buffers must have - * an skb attached. +/* Remove buffers put into a tx_queue for the current packet. + * None of the buffers must have an skb attached. */ -static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) { struct efx_tx_buffer *buffer; unsigned int bytes_compl = 0; unsigned int pkts_compl = 0; /* Work backwards until we hit the original insert pointer value */ - while (tx_queue->insert_count != tx_queue->write_count) { + while (tx_queue->insert_count != insert_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); @@ -504,6 +505,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { + unsigned int old_insert_count = tx_queue->insert_count; + bool xmit_more = skb->xmit_more; bool data_mapped = false; unsigned int segments; unsigned int skb_len; @@ -553,8 +556,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Update BQL */ netdev_tx_sent_queue(tx_queue->core_txq, skb_len); + efx_tx_maybe_stop_queue(tx_queue); + /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); /* There could be packets left on the partner queue if those @@ -577,14 +582,26 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) tx_queue->tx_packets++; } - efx_tx_maybe_stop_queue(tx_queue); - return NETDEV_TX_OK; err: - efx_enqueue_unwind(tx_queue); + efx_enqueue_unwind(tx_queue, old_insert_count); dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!xmit_more) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + + efx_nic_push_buffers(tx_queue); + } + return NETDEV_TX_OK; } -- cgit v1.2.3 From b526e56b31832385c97b224fcb9f45536a568527 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 24 May 2018 19:27:07 +0800 Subject: net: fec: remove stale comment This comment is outdated as fec_ptp_ioctl has been replaced by fec_ptp_set/fec_ptp_get since commit 1d5244d0e43b ("fec: Implement the SIOCGHWTSTAMP ioctl") Signed-off-by: YueHaibing Acked-by: Fugang Duan Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_ptp.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f81439796ac7..d438ef8a371d 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -466,12 +466,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -/** - * fec_ptp_hwtstamp_ioctl - control hardware time stamping - * @ndev: pointer to net_device - * @ifreq: ioctl data - * @cmd: particular ioctl requested - */ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) { struct fec_enet_private *fep = netdev_priv(ndev); -- cgit v1.2.3 From 57ccaedb74158be6d7d6edb255a6c153ec19e618 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 24 May 2018 17:49:30 +0530 Subject: cxgb4/cxgb4vf: link management changes for new SFP newer SFPs like SFP28 and QSFP28 Transceiver Modules present several new possibilities which we haven't faced before. Fix the assumptions in the code reflecting the more limited capabilities of previous Transceiver Module systems Original work by Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 22 +++++----- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 34 ++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 47 +++++++++++++++++++++- 3 files changed, 85 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 59d04d73c672..f7eef93ffc87 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -800,24 +800,20 @@ static int set_link_ksettings(struct net_device *dev, if (base->duplex != DUPLEX_FULL) return -EINVAL; - if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { - /* PHY offers a single speed. See if that's what's - * being requested. - */ - if (base->autoneg == AUTONEG_DISABLE && - (lc->pcaps & speed_to_fw_caps(base->speed))) - return 0; - return -EINVAL; - } - old_lc = *lc; - if (base->autoneg == AUTONEG_DISABLE) { + if (!(lc->pcaps & FW_PORT_CAP32_ANEG) || + base->autoneg == AUTONEG_DISABLE) { fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->pcaps & fw_caps)) + /* Must only specify a single speed which must be supported + * as part of the Physical Port Capabilities. + */ + if ((fw_caps & (fw_caps - 1)) != 0 || + !(lc->pcaps & fw_caps)) return -EINVAL; + lc->speed_caps = fw_caps; - lc->acaps = 0; + lc->acaps = fw_caps; } else { fw_caps = lmm_to_fw_caps(link_ksettings->link_modes.advertising); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 704f6960a6ea..962384cdd64b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4066,6 +4066,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; struct fw_port_cmd cmd; unsigned int fw_mdi; + int ret; fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); /* Convert driver coding of Pause Frame Flow Control settings into the @@ -4100,6 +4101,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } + if (rcap & ~lc->pcaps) { + dev_err(adapter->pdev_dev, + "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", + rcap, lc->pcaps); + return -EINVAL; + } + /* And send that on to the Firmware ... */ memset(&cmd, 0, sizeof(cmd)); @@ -4110,13 +4118,21 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 ? FW_PORT_ACTION_L1_CFG : FW_PORT_ACTION_L1_CFG32) | - FW_LEN16(cmd)); + FW_LEN16(cmd)); if (fw_caps == FW_CAPS16) cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); else cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); - return t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, - sleep_ok, timeout); + + ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, + sleep_ok, timeout); + if (ret) { + dev_err(adapter->pdev_dev, + "Requested Port Capabilities %#x rejected, error %d\n", + rcap, -ret); + return ret; + } + return ret; } /** @@ -8395,7 +8411,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->lpacaps = lpacaps; lc->acaps = acaps & ADVERT_MASK; - if (lc->acaps & FW_PORT_CAP32_ANEG) { + if (!(lc->acaps & FW_PORT_CAP32_ANEG)) { + lc->autoneg = AUTONEG_DISABLE; + } else if (lc->acaps & FW_PORT_CAP32_ANEG) { lc->autoneg = AUTONEG_ENABLE; } else { /* When Autoneg is disabled, user needs to set @@ -8600,6 +8618,13 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, lc->requested_fec = FEC_AUTO; lc->fec = fwcap_to_cc_fec(lc->def_acaps); + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ if (lc->pcaps & FW_PORT_CAP32_ANEG) { lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; @@ -8607,6 +8632,7 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, } else { lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; + lc->speed_caps = fwcap_to_fwspeed(acaps); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 3017f7873ff9..8786431a8825 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -405,6 +405,36 @@ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) return 0; } +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. It will be either the highest speed from the list of + * speeds or whatever user has set using ethtool. + */ +static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + return 0; +} + /* * init_link_config - initialize a link's SW state * @lc: structure holding the link state @@ -431,6 +461,13 @@ static void init_link_config(struct link_config *lc, lc->requested_fec = FEC_AUTO; lc->fec = lc->auto_fec; + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ if (lc->pcaps & FW_PORT_CAP32_ANEG) { lc->acaps = acaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; @@ -438,6 +475,7 @@ static void init_link_config(struct link_config *lc, } else { lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; + lc->speed_caps = fwcap_to_fwspeed(acaps); } } @@ -1955,7 +1993,14 @@ static void t4vf_handle_get_port_info(struct port_info *pi, lc->lpacaps = lpacaps; lc->acaps = acaps & ADVERT_MASK; - if (lc->acaps & FW_PORT_CAP32_ANEG) { + /* If we're not physically capable of Auto-Negotiation, note + * this as Auto-Negotiation disabled. Otherwise, we track + * what Auto-Negotiation settings we have. Note parallel + * structure in init_link_config(). + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + lc->autoneg = AUTONEG_DISABLE; + } else if (lc->acaps & FW_PORT_CAP32_ANEG) { lc->autoneg = AUTONEG_ENABLE; } else { /* When Autoneg is disabled, user needs to set -- cgit v1.2.3 From e8d452923ae6cdcf2fd7bf4fac4a3751eda56931 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 24 May 2018 18:32:15 +0530 Subject: cxgb4: clean up init_one clean up init_one and use chip_ver consistently throughout init_one() for chip version. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 47 +++++++++++++---------- drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h | 2 + 2 files changed, 28 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8405187f8e3f..903c0af37456 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5240,14 +5240,11 @@ static void free_some_resources(struct adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) #define SEGMENT_SIZE 128 -static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) +static int t4_get_chip_type(struct adapter *adap, int ver) { - u16 device_id; - - /* Retrieve adapter's device ID */ - pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); - switch (device_id >> 12) { + switch (ver) { case CHELSIO_T4: return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); case CHELSIO_T5: @@ -5255,8 +5252,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) case CHELSIO_T6: return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); default: - dev_err(&pdev->dev, "Device %d is not supported\n", - device_id); + break; } return -EINVAL; } @@ -5426,15 +5422,18 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int func, i, err, s_qpp, qpp, num_seg; + struct net_device *netdev; + struct adapter *adapter; + static int adap_idx = 1; + int s_qpp, qpp, num_seg; struct port_info *pi; bool highdma = false; - struct adapter *adapter = NULL; - struct net_device *netdev; - void __iomem *regs; - u32 whoami, pl_rev; enum chip_type chip; - static int adap_idx = 1; + void __iomem *regs; + int func, chip_ver; + u16 device_id; + int i, err; + u32 whoami; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -5470,11 +5469,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; /* We control everything through one PF */ - whoami = readl(regs + PL_WHOAMI_A); - pl_rev = REV_G(readl(regs + PL_REV_A)); - chip = get_chip_type(pdev, pl_rev); - func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? - SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); + whoami = t4_read_reg(adapter, PL_WHOAMI_A); + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id)); + if (chip < 0) { + dev_err(&pdev->dev, "Device %d is not supported\n", device_id); + err = chip; + goto out_free_adapter; + } + chip_ver = CHELSIO_CHIP_VERSION(chip); + func = chip_ver <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; @@ -5640,7 +5645,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; - if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) { + if (chip_ver > CHELSIO_T5) { netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | @@ -5720,7 +5725,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n"); #if IS_ENABLED(CONFIG_IPV6) - if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && + if (chip_ver <= CHELSIO_T5 && (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { /* CLIP functionality is not present in hardware, * hence disable all offload features diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h index 54b718111e3f..721c77577ec5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h @@ -34,6 +34,8 @@ #ifndef __T4_CHIP_TYPE_H__ #define __T4_CHIP_TYPE_H__ +#define CHELSIO_PCI_ID_VER(__DeviceID) ((__DeviceID) >> 12) + #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 #define CHELSIO_T6 0x6 -- cgit v1.2.3 From e2f4f4e927f1356d561f612f1c55d64654f60353 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Thu, 24 May 2018 19:33:37 +0530 Subject: cxgb4/cxgb4vf: Notify link changes to OS-dependent code We have a confusion of two different abstractions in the Common Code: Physical Link (Port) and Logical Network Interface (Virtual Interface), and we haven't been properly managing the state of the intersection of those two abstractions. On the one hand we have the Physical state of the Link -- up or down -- and on the other we have the logical state of the VI, enabled or not. {ethN} refers to both the Physical and Logical State. In this case, ifconfig only affects/interrogates the Logical State of a VI, and ethtool only deals with the Physical State. And these are different. So, just because we disable the VI, we don't really want to change the Physical Link Up/Down state. Thus, the previous hack to set "lc->link_ok = 0" when we disable a VI is completely incorrect. Where we get into trouble is where the Physical Link State and the Logical VI State cross swords. And that happens in t4_handle_get_port_info() where we need to manage/safe the Physical Link State, but we also need to know when the Logical VI State has changed and pass that back up to the OS-dependent Driver routine t4_os_link_changed() which is concerned about the Logical Interface. So we enable a VI and that causes Firmware to send us a new Port Information message, but if none of the Physical Link State particulars have changed, we don't call t4_os_link_changed(). This fix uses the existing OS Contract APIs for the Common Code to inform the OS-dependent portion of the Host Driver when the "Link" (really Logical Network Interface) is "up" or "down". A new API t4_enable_pi_params() is added which calls t4_enable_vi_params() and, if that is successful, then calls back to the OS Contract API t4_os_link_changed() notifying the OS-dependent layer of the potential Link State change. Original Work by : Casey Leedom Signed-off-by: Santosh Rastapur Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 ++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 28 ++++++++++++++++++++++ .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 5 ++-- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 5 +++- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 24 +++++++++++++++++++ 6 files changed, 64 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0f305d97f4ee..0dbe2d9e22d6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1738,6 +1738,9 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok); int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, + struct port_info *pi, + bool rx_en, bool tx_en, bool dcb_en); int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en); int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 903c0af37456..0efae2030e71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -465,7 +465,7 @@ static int link_start(struct net_device *dev) &pi->link_cfg); if (ret == 0) { local_bh_disable(); - ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, + ret = t4_enable_pi_params(pi->adapter, mb, pi, true, true, CXGB4_DCB_ENABLED); local_bh_enable(); } @@ -2344,7 +2344,8 @@ static int cxgb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); + ret = t4_enable_pi_params(adapter, adapter->pf, pi, + false, false, false); #ifdef CONFIG_CHELSIO_T4_DCB cxgb4_dcb_reset(dev); dcb_tx_queue_prio_enable(dev, false); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 962384cdd64b..39da7e3c804b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8013,6 +8013,34 @@ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); } +/** + * t4_enable_pi_params - enable/disable a Port's Virtual Interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pi: the Port Information structure + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a Port's Virtual Interface. Note that setting DCB + * Enable only makes sense when enabling a Virtual Interface ... + * If the Virtual Interface enable/disable operation is successful, + * we notify the OS-specific code of a potential Link Status change + * via the OS Contract API t4_os_link_changed(). + */ +int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, + struct port_info *pi, + bool rx_en, bool tx_en, bool dcb_en) +{ + int ret = t4_enable_vi_params(adap, mbox, pi->viid, + rx_en, tx_en, dcb_en); + if (ret) + return ret; + t4_os_link_changed(adap, pi->port_id, + rx_en && tx_en && pi->link_cfg.link_ok); + return 0; +} + /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 71f13bd2b5e4..ff84791a0ff8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -274,7 +274,7 @@ static int link_start(struct net_device *dev) * is enabled on a port. */ if (ret == 0) - ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + ret = t4vf_enable_pi(pi->adapter, pi, true, true); /* The Virtual Interfaces are connected to an internal switch on the * chip which allows VIs attached to the same port to talk to each @@ -822,8 +822,7 @@ static int cxgb4vf_stop(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - t4vf_enable_vi(adapter, pi->viid, false, false); - pi->link_cfg.link_ok = 0; + t4vf_enable_pi(adapter, pi, false, false); clear_bit(pi->port_id, &adapter->open_device_map); if (adapter->open_device_map == 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 712e8f0c71b4..ccca67cf4487 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -391,7 +391,10 @@ int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, int t4vf_alloc_vi(struct adapter *, int); int t4vf_free_vi(struct adapter *, int); -int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); +int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, bool rx_en, + bool tx_en); +int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, bool rx_en, + bool tx_en); int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 8786431a8825..5b8c08cf523f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1399,6 +1399,30 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } +/** + * t4vf_enable_pi - enable/disable a Port's virtual interface + * @adapter: the adapter + * @pi: the Port Information structure + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a Port's virtual interface. If the Virtual + * Interface enable/disable operation is successful, we notify the + * OS-specific code of a potential Link Status change via the OS Contract + * API t4vf_os_link_changed(). + */ +int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, + bool rx_en, bool tx_en) +{ + int ret = t4vf_enable_vi(adapter, pi->viid, rx_en, tx_en); + + if (ret) + return ret; + t4vf_os_link_changed(adapter, pi->pidx, + rx_en && tx_en && pi->link_cfg.link_ok); + return 0; +} + /** * t4vf_identify_port - identify a VI's port by blinking its LED * @adapter: the adapter -- cgit v1.2.3 From 87885310c199be78a144dff4fec8a94f081920b8 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:49 -0700 Subject: qede: Refactor ethtool rx classification flow. This patch simplifies the ethtool rx flow configuration [via ethtool -U/-N] flow code base by dividing it logically into various APIs based on given protocols. It also separates various validations and calculations done along the flow in their own APIs. Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 512 ++++++++++++++++--------- 1 file changed, 330 insertions(+), 182 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 43569b1839be..bd5b4e4c1d47 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,6 +38,7 @@ #include #include "qede.h" +#define QEDE_FILTER_PRINT_MAX_LEN (64) struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -51,6 +52,18 @@ struct qede_arfs_tuple { __be16 dst_port; __be16 eth_proto; u8 ip_proto; + + /* Describe filtering mode needed for this kind of filter */ + enum qed_filter_config_mode mode; + + /* Used to compare new/old filters. Return true if IPs match */ + bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b); + + /* Given an address into ethhdr build a header from tuple info */ + void (*build_hdr)(struct qede_arfs_tuple *t, void *header); + + /* Stringify the tuple for a print into the provided buffer */ + void (*stringify)(struct qede_arfs_tuple *t, void *buffer); }; struct qede_arfs_fltr_node { @@ -90,7 +103,9 @@ struct qede_arfs { spinlock_t arfs_list_lock; unsigned long *arfs_fltr_bmap; int filter_count; - bool enable; + + /* Currently configured filtering mode */ + enum qed_filter_config_mode mode; }; static void qede_configure_arfs_fltr(struct qede_dev *edev, @@ -110,11 +125,15 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.qid = rxq_id; params.b_is_add = add_fltr; - DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", - add_fltr ? "Adding" : "Deleting", - n->flow_id, n->sw_id, ntohs(n->tuple.src_port), - ntohs(n->tuple.dst_port), rxq_id); + if (n->tuple.stringify) { + char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; + + n->tuple.stringify(&n->tuple, tuple_buffer); + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, + "%s sw_id[0x%x]: %s [queue %d]\n", + add_fltr ? "Adding" : "Deleting", + n->sw_id, tuple_buffer, rxq_id); + } n->used = true; n->filter_op = add_fltr; @@ -145,14 +164,13 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, INIT_HLIST_NODE(&fltr->node); hlist_add_head(&fltr->node, QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - enum qed_filter_config_mode mode; - mode = QED_FILTER_CONFIG_MODE_5_TUPLE; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - edev->arfs->enable = true; + edev->arfs->filter_count++; + if (edev->arfs->filter_count == 1 && + edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { + edev->ops->configure_arfs_searcher(edev->cdev, + fltr->tuple.mode); + edev->arfs->mode = fltr->tuple.mode; } return 0; @@ -167,14 +185,15 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, fltr->buf_len, DMA_TO_DEVICE); qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; - if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->filter_count--; + if (!edev->arfs->filter_count && + edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) { enum qed_filter_config_mode mode; mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, mode); + edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE; } } @@ -264,25 +283,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) } } +#ifdef CONFIG_RFS_ACCEL spin_lock_bh(&edev->arfs->arfs_list_lock); - if (!edev->arfs->filter_count) { - if (edev->arfs->enable) { - enum qed_filter_config_mode mode; - - mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - } -#ifdef CONFIG_RFS_ACCEL - } else { + if (edev->arfs->filter_count) { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); -#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); +#endif } /* This function waits until all aRFS filters get deleted and freed. @@ -512,6 +523,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, eth->h_proto = skb->protocol; n->tuple.eth_proto = skb->protocol; n->tuple.ip_proto = ip_proto; + n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); @@ -1339,38 +1351,6 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) return NULL; } -static bool -qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, - struct ethtool_rx_flow_spec *fsp, - __be16 proto) -{ - if (proto == htons(ETH_P_IP)) { - struct ethtool_tcpip4_spec *ip; - - ip = &fsp->h_u.tcp_ip4_spec; - - if (tpos->tuple.src_ipv4 == ip->ip4src && - tpos->tuple.dst_ipv4 == ip->ip4dst) - return true; - else - return false; - } else { - struct ethtool_tcpip6_spec *ip6; - struct in6_addr *src; - - ip6 = &fsp->h_u.tcp_ip6_spec; - src = &tpos->tuple.src_ipv6; - - if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && - !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, - sizeof(struct in6_addr))) - return true; - else - return false; - } - return false; -} - int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs) { @@ -1461,96 +1441,306 @@ unlock: } static int -qede_validate_and_check_flow_exist(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fsp, - int *min_hlen) +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) { - __be16 src_port = 0x0, dst_port = 0x0; - struct qede_arfs_fltr_node *fltr; - struct hlist_node *temp; - struct hlist_head *head; - __be16 eth_proto; - u8 ip_proto; + int count = QEDE_ARFS_POLL_COUNT; - if (fsp->location >= QEDE_RFS_MAX_FLTR || - fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) - return -EINVAL; + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + DP_NOTICE(edev, "Timeout in polling filter config\n"); + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t) +{ + int size = ETH_HLEN; + + if (t->eth_proto == htons(ETH_P_IP)) + size += sizeof(struct iphdr); + else + size += sizeof(struct ipv6hdr); + + if (t->ip_proto == IPPROTO_TCP) + size += sizeof(struct tcphdr); + else + size += sizeof(struct udphdr); + + return size; +} + +static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IP) || + b->eth_proto != htons(ETH_P_IP)) + return false; + + return (a->src_ipv4 == b->src_ipv4) && + (a->dst_ipv4 == b->dst_ipv4); +} + +static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr)); + struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + ip->saddr = t->src_ipv4; + ip->daddr = t->dst_ipv4; + ip->version = 0x4; + ip->ihl = 0x5; + ip->protocol = t->ip_proto; + ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN); + + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t, + void *buffer) +{ + const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP"; + + snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN, + "%s %pI4 (%04x) -> %pI4 (%04x)", + prefix, &t->src_ipv4, t->src_port, + &t->dst_ipv4, t->dst_port); +} + +static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IPV6) || + b->eth_proto != htons(ETH_P_IPV6)) + return false; + + if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr))) + return false; + + if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr))) + return false; + + return true; +} - if (fsp->flow_type == TCP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_UDP; - } else if (fsp->flow_type == TCP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_UDP; +static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr)); + struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (t->ip_proto == IPPROTO_TCP) { + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); } else { - DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", - fsp->flow_type); - return -EPROTONOSUPPORT; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); } - if (eth_proto == htons(ETH_P_IP)) { - src_port = fsp->h_u.tcp_ip4_spec.psrc; - dst_port = fsp->h_u.tcp_ip4_spec.pdst; + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->eth_proto = htons(ETH_P_IP); + t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; + t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; + t->src_port = fs->h_u.tcp_ip4_spec.psrc; + t->dst_port = fs->h_u.tcp_ip4_spec.pdst; + + /* We must have a valid 4-tuple */ + if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; } else { - src_port = fsp->h_u.tcp_ip6_spec.psrc; - dst_port = fsp->h_u.tcp_ip6_spec.pdst; + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; } - head = QEDE_ARFS_BUCKET_HEAD(edev, 0); - hlist_for_each_entry_safe(fltr, temp, head, node) { - if ((fltr->tuple.ip_proto == ip_proto && - fltr->tuple.eth_proto == eth_proto && - qede_compare_user_flow_ips(fltr, fsp, eth_proto) && - fltr->tuple.src_port == src_port && - fltr->tuple.dst_port == dst_port) || - fltr->sw_id == fsp->location) - return -EEXIST; + t->ip_comp = qede_flow_spec_ipv4_cmp; + t->build_hdr = qede_flow_build_ipv4_hdr; + t->stringify = qede_flow_stringify_ipv4_hdr; + + return 0; +} + +static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_TCP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct in6_addr zero_addr; + void *p; + + p = &zero_addr; + memset(p, 0, sizeof(zero_addr)); + + t->eth_proto = htons(ETH_P_IPV6); + memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + t->src_port = fs->h_u.tcp_ip6_spec.psrc; + t->dst_port = fs->h_u.tcp_ip6_spec.pdst; + + /* We must make sure we have a valid 4-tuple */ + if (t->src_port && t->dst_port && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else { + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; } + t->ip_comp = qede_flow_spec_ipv6_cmp; + t->build_hdr = qede_flow_build_ipv6_hdr; + return 0; } -static int -qede_poll_arfs_filter_config(struct qede_dev *edev, - struct qede_arfs_fltr_node *fltr) +static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) { - int count = QEDE_ARFS_POLL_COUNT; + t->ip_proto = IPPROTO_TCP; - while (fltr->used && count) { - msleep(20); - count--; + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + memset(t, 0, sizeof(*t)); + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); + case UDP_V4_FLOW: + return qede_flow_spec_to_tuple_udpv4(edev, t, fs); + case TCP_V6_FLOW: + return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); + case UDP_V6_FLOW: + return qede_flow_spec_to_tuple_udpv6(edev, t, fs); + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; } - if (count == 0 || fltr->fw_rc) { - qede_dequeue_fltr_and_config_searcher(edev, fltr); - return -EIO; + return 0; +} + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs, + struct qede_arfs_tuple *t) +{ + if (fs->location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; } - return fltr->fw_rc; + /* Check location isn't already in use */ + if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + + return 0; +} + +/* Must be called while qede lock is held */ +static struct qede_arfs_fltr_node * +qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry_safe(fltr, temp, head, node) { + if (fltr->tuple.ip_proto == t->ip_proto && + fltr->tuple.src_port == t->src_port && + fltr->tuple.dst_port == t->dst_port && + t->ip_comp(&fltr->tuple, t)) + return fltr; + } + + return NULL; } int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *n; - int min_hlen = ETH_HLEN, rc; - struct ethhdr *eth; - struct iphdr *ip; - __be16 *ports; + struct qede_arfs_tuple t; + int min_hlen, rc; __qede_lock(edev); @@ -1559,16 +1749,28 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) goto unlock; } - rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_tuple(edev, &t, fsp); + if (rc) + goto unlock; + + /* Make sure location is valid and filter isn't already set */ + rc = qede_flow_spec_validate(edev, fsp, &t); if (rc) goto unlock; + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { rc = -ENOMEM; goto unlock; } + min_hlen = qede_flow_get_min_header_size(&t); n->data = kzalloc(min_hlen, GFP_KERNEL); if (!n->data) { kfree(n); @@ -1581,66 +1783,11 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) n->buf_len = min_hlen; n->rxq_id = fsp->ring_cookie; n->next_rxq_id = n->rxq_id; - eth = (struct ethhdr *)n->data; - if (info->fs.flow_type == TCP_V4_FLOW || - info->fs.flow_type == UDP_V4_FLOW) { - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct iphdr)); - eth->h_proto = htons(ETH_P_IP); - n->tuple.eth_proto = htons(ETH_P_IP); - n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; - n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; - n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - ip = (struct iphdr *)(n->data + ETH_HLEN); - ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; - ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; - ip->version = 0x4; - ip->ihl = 0x5; - - if (info->fs.flow_type == TCP_V4_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip->protocol = IPPROTO_TCP; - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip->protocol = IPPROTO_UDP; - } - ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); - } else { - struct ipv6hdr *ip6; - - ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct ipv6hdr)); - eth->h_proto = htons(ETH_P_IPV6); - n->tuple.eth_proto = htons(ETH_P_IPV6); - memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - memcpy(&ip6->saddr, &n->tuple.src_ipv6, - sizeof(struct in6_addr)); - memcpy(&ip6->daddr, &n->tuple.dst_ipv6, - sizeof(struct in6_addr)); - ip6->version = 0x6; + memcpy(&n->tuple, &t, sizeof(n->tuple)); - if (info->fs.flow_type == TCP_V6_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip6->nexthdr = NEXTHDR_TCP; - ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip6->nexthdr = NEXTHDR_UDP; - ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); - } - } + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); if (rc) @@ -1650,6 +1797,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) rc = qede_poll_arfs_filter_config(edev, n); unlock: __qede_unlock(edev); + return rc; } -- cgit v1.2.3 From 89ffd14ee95dca812874fcd25ad3538ff3592a49 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:50 -0700 Subject: qede: Validate unsupported configurations Validate and prevent some of the configurations for unsupported [by firmware] inputs [for example - mac ext, vlans, masks/prefix, tos/tclass] via ethtool -N/-U. Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 73 ++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index bd5b4e4c1d47..43ed420900c1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1560,10 +1560,63 @@ static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, ports[1] = t->dst_port; } +/* Validate fields which are set and not accepted by the driver */ +static int qede_flow_spec_validate_unused(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs) +{ + if (fs->flow_type & FLOW_MAC_EXT) { + DP_INFO(edev, "Don't support MAC extensions\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) { + DP_INFO(edev, "Don't support vlan-based classification\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] || fs->h_ext.data[1])) { + DP_INFO(edev, "Don't support user defined data\n"); + return -EOPNOTSUPP; + } + + return 0; +} + static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, struct qede_arfs_tuple *t, struct ethtool_rx_flow_spec *fs) { + if ((fs->h_u.tcp_ip4_spec.ip4src & + fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.ip4dst & + fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.psrc & + fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.pdst & + fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip4_spec.tos) { + DP_INFO(edev, "Don't support tos\n"); + return -EOPNOTSUPP; + } + t->eth_proto = htons(ETH_P_IP); t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; @@ -1619,6 +1672,23 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, p = &zero_addr; memset(p, 0, sizeof(zero_addr)); + if ((fs->h_u.tcp_ip6_spec.psrc & + fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip6_spec.pdst & + fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip6_spec.tclass) { + DP_INFO(edev, "Don't support tclass\n"); + return -EOPNOTSUPP; + } + t->eth_proto = htons(ETH_P_IPV6); memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, sizeof(struct in6_addr)); @@ -1673,6 +1743,9 @@ static int qede_flow_spec_to_tuple(struct qede_dev *edev, { memset(t, 0, sizeof(*t)); + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + switch ((fs->flow_type & ~FLOW_EXT)) { case TCP_V4_FLOW: return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); -- cgit v1.2.3 From 3893fc62b1769db3ef160f7f1e36d3db754497ee Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:51 -0700 Subject: qed*: Support other classification modes. Currently, driver supports flow classification to PF receive queues based on TCP/UDP 4 tuples [src_ip, dst_ip, src_port, dst_port] only. This patch enables to configure different flow profiles [For example - only UDP dest port or src_ip based] on the adapter so that classification can be done according to just those fields as well. Although, at a time just one type of flow configuration is supported due to limited number of flow profiles available on the device. For example - ethtool -N enp7s0f0 flow-type udp4 dst-port 45762 action 2 ethtool -N enp7s0f0 flow-type tcp4 src-ip 192.16.4.10 action 1 ethtool -N enp7s0f0 flow-type udp6 dst-port 45762 action 3 Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 ++ drivers/net/ethernet/qlogic/qede/qede_filter.c | 31 ++++++++++++++++++++++++-- include/linux/qed/qed_eth_if.h | 1 + 3 files changed, 32 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 5e655c3601cf..3cb8a8088467 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1973,6 +1973,8 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) return GFT_PROFILE_TYPE_4_TUPLE; if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) return GFT_PROFILE_TYPE_IP_DST_ADDR; + if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) + return GFT_PROFILE_TYPE_IP_SRC_ADDR; return GFT_PROFILE_TYPE_L4_DST_PORT; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 43ed420900c1..9b84f0cb12e7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1623,9 +1623,17 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip4_spec.psrc; t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - /* We must have a valid 4-tuple */ + /* We must either have a valid 4-tuple or only dst port + * or only src ip as an input + */ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !t->src_ipv4 && !t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !t->dst_ipv4 && t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; @@ -1697,11 +1705,21 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip6_spec.psrc; t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - /* We must make sure we have a valid 4-tuple */ + /* We must make sure we have a valid 4-tuple or only dest port + * or only src ip as an input + */ if (t->src_port && t->dst_port && memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; @@ -1779,6 +1797,15 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { DP_INFO(edev, "Queue out-of-bounds\n"); return -EINVAL; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7f9756fe9180..557e86e1c7d9 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -66,6 +66,7 @@ enum qed_filter_config_mode { QED_FILTER_CONFIG_MODE_5_TUPLE, QED_FILTER_CONFIG_MODE_L4_PORT, QED_FILTER_CONFIG_MODE_IP_DEST, + QED_FILTER_CONFIG_MODE_IP_SRC, }; struct qed_ntuple_filter_params { -- cgit v1.2.3 From 39385ab02c3e6ffe8f70a445433c7419fd2df753 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:52 -0700 Subject: qede: Support flow classification to the VFs. With the supported classification modes [4 tuples based, udp port based, src-ip based], flows can be classified to the VFs as well. With this patch, flows can be re-directed to the requested VF provided in "action" field of command. Please note that driver doesn't really care about the queue bits in "action" field for the VFs. Since queue will be still chosen by FW using RSS hash. [I.e., the classification would be done according to vport-only] For examples - ethtool -N p5p1 flow-type udp4 dst-port 8000 action 0x100000000 ethtool -N p5p1 flow-type tcp4 src-ip 192.16.6.10 action 0x200000000 ethtool -U p5p1 flow-type tcp4 src-ip 192.168.40.100 dst-ip \ 192.168.40.200 src-port 6660 dst-port 5550 \ action 0x100000000 Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 34 +++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9b84f0cb12e7..6c02c21d996d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -86,6 +86,7 @@ struct qede_arfs_fltr_node { u16 sw_id; u16 rxq_id; u16 next_rxq_id; + u8 vfid; bool filter_op; bool used; u8 fw_rc; @@ -125,14 +126,19 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.qid = rxq_id; params.b_is_add = add_fltr; + if (n->vfid) { + params.b_is_vf = true; + params.vf_id = n->vfid - 1; + } + if (n->tuple.stringify) { char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; n->tuple.stringify(&n->tuple, tuple_buffer); DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s sw_id[0x%x]: %s [queue %d]\n", + "%s sw_id[0x%x]: %s [vf %u queue %d]\n", add_fltr ? "Adding" : "Deleting", - n->sw_id, tuple_buffer, rxq_id); + n->sw_id, tuple_buffer, n->vfid, rxq_id); } n->used = true; @@ -1435,6 +1441,10 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) fsp->ring_cookie = fltr->rxq_id; + if (fltr->vfid) { + fsp->ring_cookie |= ((u64)fltr->vfid) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + } unlock: __qede_unlock(edev); return rc; @@ -1806,6 +1816,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } + if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) + return 0; + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { DP_INFO(edev, "Queue out-of-bounds\n"); return -EINVAL; @@ -1835,6 +1848,19 @@ qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) return NULL; } +static void qede_flow_set_destination(struct qede_dev *edev, + struct qede_arfs_fltr_node *n, + struct ethtool_rx_flow_spec *fs) +{ + n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + n->next_rxq_id = n->rxq_id; + + if (n->vfid) + DP_VERBOSE(edev, QED_MSG_SP, + "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); +} + int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec *fsp = &info->fs; @@ -1881,11 +1907,11 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) n->sw_id = fsp->location; set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); n->buf_len = min_hlen; - n->rxq_id = fsp->ring_cookie; - n->next_rxq_id = n->rxq_id; memcpy(&n->tuple, &t, sizeof(n->tuple)); + qede_flow_set_destination(edev, n, fsp); + /* Build a minimal header according to the flow */ n->tuple.build_hdr(&n->tuple, n->data); -- cgit v1.2.3 From 608e00d0a2eb53079c55dc9c14d8711bbb3a4390 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 24 May 2018 09:54:53 -0700 Subject: qed*: Support drop action classification With this patch, User can configure for the supported flows to be dropped. Added a stat "gft_filter_drop" as well to be populated in ethtool for the dropped flows. For example - ethtool -N p5p1 flow-type udp4 dst-port 8000 action -1 ethtool -N p5p1 flow-type tcp4 scr-ip 192.168.8.1 action -1 Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 35 ++++++++++++++----------- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 1 + drivers/net/ethernet/qlogic/qede/qede_filter.c | 14 ++++++++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 1 + include/linux/qed/qed_eth_if.h | 3 +++ include/linux/qed/qed_if.h | 1 + 7 files changed, 41 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 3cb8a8088467..1c0d0c217936 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1677,6 +1677,8 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(tstats.mftag_filter_discard); p_stats->common.mac_filter_discards += HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.gft_filter_drop += + HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); } static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, @@ -2015,16 +2017,6 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc = -EINVAL; - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); - if (rc) - return rc; - } - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); @@ -2049,15 +2041,28 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - p_ramrod->rx_qid_valid = 1; - p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + if (p_params->b_is_drop) { + p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); + } else { + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, + &abs_rx_q_id); + if (rc) + return rc; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); } p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - - p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 2d3f09ed413b..81c5c8dfa2ef 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -75,6 +75,7 @@ struct qede_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8c6fdad91986..6906e04b609e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -161,6 +161,7 @@ static const struct { QEDE_STAT(no_buff_discards), QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), + QEDE_PF_STAT(gft_filter_drop), QEDE_STAT(tx_err_drop_pkts), QEDE_STAT(ttl0_discard), QEDE_STAT(packet_too_big_discard), diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6c02c21d996d..e9e088d9c815 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -90,6 +90,7 @@ struct qede_arfs_fltr_node { bool filter_op; bool used; u8 fw_rc; + bool b_is_drop; struct hlist_node node; }; @@ -125,6 +126,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.length = n->buf_len; params.qid = rxq_id; params.b_is_add = add_fltr; + params.b_is_drop = n->b_is_drop; if (n->vfid) { params.b_is_vf = true; @@ -1445,6 +1447,9 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) fsp->ring_cookie |= ((u64)fltr->vfid) << ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; } + + if (fltr->b_is_drop) + fsp->ring_cookie = RX_CLS_FLOW_DISC; unlock: __qede_unlock(edev); return rc; @@ -1816,6 +1821,10 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } + /* If drop requested then no need to validate other data */ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + return 0; + if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) return 0; @@ -1852,6 +1861,11 @@ static void qede_flow_set_destination(struct qede_dev *edev, struct qede_arfs_fltr_node *n, struct ethtool_rx_flow_spec *fs) { + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + n->b_is_drop = true; + return; + } + n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); n->next_rxq_id = n->rxq_id; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9e70f713c3c5..d118771e1a7b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -347,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; p_common->mftag_filter_discards = stats.common.mftag_filter_discards; p_common->mac_filter_discards = stats.common.mac_filter_discards; + p_common->gft_filter_drop = stats.common.gft_filter_drop; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 557e86e1c7d9..2978fa4add42 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -89,6 +89,9 @@ struct qed_ntuple_filter_params { /* true iff this filter is to be added. Else to be removed */ bool b_is_add; + + /* If flow needs to be dropped */ + bool b_is_drop; }; struct qed_dev_eth_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 44af652e7407..ac991a3b8f03 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1129,6 +1129,7 @@ struct qed_eth_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; -- cgit v1.2.3 From a45675796a0763f5e0ffb75496c3865e961560d3 Mon Sep 17 00:00:00 2001 From: Bo Chen Date: Thu, 24 May 2018 12:48:35 -0700 Subject: 8139too: Remove unnecessary netif_napi_del() The call to free_netdev() in __rtl8139_cleanup_dev() clears the network device napi list, and explicit calls to netif_napi_del() are unnecessary. Signed-off-by: Bo Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/8139too.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d118da5a10a2..ffd68a7bc9e1 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1104,7 +1104,6 @@ static int rtl8139_init_one(struct pci_dev *pdev, return 0; err_out: - netif_napi_del(&tp->napi); __rtl8139_cleanup_dev (dev); pci_disable_device (pdev); return i; @@ -1119,7 +1118,6 @@ static void rtl8139_remove_one(struct pci_dev *pdev) assert (dev != NULL); cancel_delayed_work_sync(&tp->thread); - netif_napi_del(&tp->napi); unregister_netdev (dev); -- cgit v1.2.3 From e52cde71709348c0d67bf0f213b438fa4d6cf9a9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 24 May 2018 20:52:14 -0700 Subject: net: dsa: dsa_loop: Make dynamic debugging helpful Remove redundant debug prints from phy_read/write since we can trace those calls through trace events. Enhance dynamic debug prints to print arguments which helps figuring how what is going on at the driver level with higher level configuration interfaces. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 58f14af04639..816f34d64736 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -67,7 +67,7 @@ static struct phy_device *phydevs[PHY_MAX_ADDR]; static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds, int port) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d\n", __func__, port); return DSA_TAG_PROTO_NONE; } @@ -124,8 +124,6 @@ static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum) struct mii_bus *bus = ps->bus; int ret; - dev_dbg(ds->dev, "%s\n", __func__); - ret = mdiobus_read_nested(bus, ps->port_base + port, regnum); if (ret < 0) ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++; @@ -142,8 +140,6 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, struct mii_bus *bus = ps->bus; int ret; - dev_dbg(ds->dev, "%s\n", __func__); - ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value); if (ret < 0) ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++; @@ -156,7 +152,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", + __func__, port, bridge->name); return 0; } @@ -164,19 +161,22 @@ static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *bridge) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", + __func__, port, bridge->name); } static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, state: %d\n", + __func__, port, state); } static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n", + __func__, port, vlan_filtering); return 0; } @@ -188,7 +188,8 @@ dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, struct dsa_loop_priv *ps = ds->priv; struct mii_bus *bus = ps->bus; - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d", + __func__, port, vlan->vid_begin, vlan->vid_end); /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -209,8 +210,6 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, struct dsa_loop_vlan *vl; u16 vid; - dev_dbg(ds->dev, "%s\n", __func__); - /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -222,6 +221,9 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, vl->untagged |= BIT(port); else vl->untagged &= ~BIT(port); + + dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n", + __func__, port, vid, untagged ? "un" : "", pvid); } if (pvid) @@ -237,8 +239,6 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, struct dsa_loop_vlan *vl; u16 vid, pvid = ps->pvid; - dev_dbg(ds->dev, "%s\n", __func__); - /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -251,6 +251,9 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, if (pvid == vid) pvid = 1; + + dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n", + __func__, port, vid, untagged ? "un" : "", pvid); } ps->pvid = pvid; -- cgit v1.2.3 From 52fff3274b08c2eaea33d3df546fcd91040dee3f Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Wed, 16 May 2018 17:20:17 +0900 Subject: net/mlx5: E-Switch, Reorganize and rename fdb flow tables We have several fdb flow tables for each of the legacy and switchdev modes. In the switchdev mode, there are fast path and slow path flow tables. Towards adding more flow tables in upcoming patches, reorganize and rename the various existing ones to reflect their functionality. Signed-off-by: Chris Mi Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 22 +++++++++++----------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +++-- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 22 +++++++++++----------- 3 files changed, 25 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 09f0e11c6ffc..6cab1dd66d1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, spec->match_criteria_enable = match_header; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = - mlx5_add_flow_rules(esw->fdb_table.fdb, spec, + mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, @@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create FDB Table err %d\n", err); goto out; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.legacy.fdb = fdb; /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -343,9 +343,9 @@ out: mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); esw->fdb_table.legacy.addr_grp = NULL; } - if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) { + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; } } @@ -355,15 +355,15 @@ out: static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return; esw_debug(esw->dev, "Destroy FDB Table\n"); mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; @@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ - if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) + if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", @@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); @@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index f47a14e31b7d..d1a3f7fcca1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -117,16 +117,17 @@ struct mlx5_vport { }; struct mlx5_eswitch_fdb { - void *fdb; union { struct legacy_fdb { + struct mlx5_flow_table *fdb; struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *promisc_grp; } legacy; struct offloads_fdb { - struct mlx5_flow_table *fdb; + struct mlx5_flow_table *fast_fdb; + struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; struct mlx5_flow_handle *miss_rule_uni; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b9ea464bcfa9..bb8eac5523a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -119,7 +119,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; - rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, + rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); if (IS_ERR(rule)) goto err_add_rule; @@ -363,7 +363,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn dest.vport.num = vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); @@ -407,7 +407,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dest.vport.num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -422,7 +422,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); dmac_v[0] = 0x01; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -476,7 +476,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); goto out; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.offloads.fast_fdb = fdb; out: return err; @@ -484,7 +484,7 @@ out: static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); } #define MAX_PF_SQ 256 @@ -530,7 +530,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); goto slow_fdb_err; } - esw->fdb_table.offloads.fdb = fdb; + esw->fdb_table.offloads.slow_fdb = fdb; /* create send-to-vport group */ memset(flow_group_in, 0, inlen); @@ -586,9 +586,9 @@ miss_rule_err: miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: - mlx5_destroy_flow_table(esw->fdb_table.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); fast_fdb_err: ns_err: kvfree(flow_group_in); @@ -597,7 +597,7 @@ ns_err: static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.offloads.fast_fdb) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); @@ -606,7 +606,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); esw_destroy_offloads_fast_fdb_table(esw); } -- cgit v1.2.3 From a842dd04cf85fbc6e21e65a344b957f4a1dc0413 Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Wed, 16 May 2018 17:42:52 +0900 Subject: net/mlx5: E-switch, Create a second level FDB flow table If firmware supports the forward action with a destination list that includes a flow table, create a second level FDB flow table. This is going to be used for flow based mirroring under the switchdev offloads mode. Signed-off-by: Chris Mi Reviewed-by: Paul Blakey Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 +++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 31 +++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 3 files changed, 32 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index d1a3f7fcca1c..d06c11629121 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -55,6 +55,9 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) +#define mlx5_esw_has_fwd_fdb(dev) \ + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -127,6 +130,7 @@ struct mlx5_eswitch_fdb { struct offloads_fdb { struct mlx5_flow_table *fast_fdb; + struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index bb8eac5523a7..8ea11f24380c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -454,7 +454,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); err = -EOPNOTSUPP; - goto out; + goto out_namespace; } esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", @@ -464,6 +464,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + if (mlx5_esw_has_fwd_fdb(dev)) + esw_size >>= 1; + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; @@ -474,16 +477,36 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); - goto out; + goto out_namespace; } esw->fdb_table.offloads.fast_fdb = fdb; -out: + if (!mlx5_esw_has_fwd_fdb(dev)) + goto out_namespace; + + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, + esw_size, + ESW_OFFLOADS_NUM_GROUPS, 1, + flags); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create fwd table err %d\n", err); + goto out_ft; + } + esw->fdb_table.offloads.fwd_fdb = fdb; + + return err; + +out_ft: + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); +out_namespace: return err; } static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) { + if (mlx5_esw_has_fwd_fdb(esw->dev)) + mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); } @@ -588,7 +611,7 @@ miss_err: send_vport_err: mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: - mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); + esw_destroy_offloads_fast_fdb_table(esw); fast_fdb_err: ns_err: kvfree(flow_group_in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 806e95523f9e..f9c2c03083eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2495,7 +2495,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; - prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); + prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2); if (IS_ERR(prio)) goto out_err; -- cgit v1.2.3 From 592d3651596924b0b1e9030634c48d262451eeaf Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Fri, 4 May 2018 14:09:00 +0900 Subject: net/mlx5e: Parse mirroring action for offloaded TC eswitch flows Currently, we only support the mirred redirect TC sub-action. In order to support flow based vport mirroring, add support to parse the mirred mirror sub-action. For mirroring, user-space will typically set the action order such that the mirror port (mirror VF) sees packets as the original port (VF under mirroring) sent them or as it will receive them. In the general case, it means that packets are potentially sent to the mirror port before or after some actions were applied on them. To properly do that, we should follow on the exact action order as set for the flow and make sure this will also be the case when we program the HW offload. We introduce a counter for the output ports (attr->out_count), which we increase when parsing each mirred redirect/mirror sub-action and when dealing with encap. We introduce a counter (attr->mirror_count) telling us if split is needed. If no split is needed and mirroring is just multicasting to vport, the mirror count is zero, all the actions of the TC flow should apply on that single HW flow. If split is needed, the mirror count tells where to do the split, all non-mirred tc actions should apply only after the split. The mirror count is set while parsing the following actions encap/decap, header re-write, vlan push/pop. Signed-off-by: Chris Mi Reviewed-by: Paul Blakey Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 26 +++++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 ++++++-- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 28 ++++++++++++---------- 3 files changed, 43 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a9c96fe8e4fe..302c5500f9ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -844,8 +844,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; - attr->out_mdev = out_priv->mdev; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } err = mlx5_eswitch_add_vlan_action(esw, attr); @@ -2537,6 +2537,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + attr->mirror_count = attr->out_count; continue; } @@ -2548,12 +2549,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EOPNOTSUPP; } - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *out_dev; + if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) { struct mlx5e_priv *out_priv; + struct net_device *out_dev; out_dev = tcf_mirred_dev(a); + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + pr_err("can't support more than %d output ports, can't offload forwarding\n", + attr->out_count); + return -EOPNOTSUPP; + } + if (switchdev_port_same_parent_id(priv->netdev, out_dev) || is_merged_eswitch_dev(priv, out_dev)) { @@ -2561,8 +2568,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; - attr->out_mdev = out_priv->mdev; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } else if (encap) { parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; @@ -2585,6 +2592,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, encap = true; else return -EOPNOTSUPP; + attr->mirror_count = attr->out_count; continue; } @@ -2606,6 +2614,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } + attr->mirror_count = attr->out_count; continue; } @@ -2621,6 +2630,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; + if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); + return -EOPNOTSUPP; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index d06c11629121..386b0e354c41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -239,12 +239,18 @@ enum mlx5_flow_match_level { MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, }; +/* current maximum for flow based vport multicasting */ +#define MLX5_MAX_FLOW_FWD_VPORTS 2 + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; - struct mlx5_eswitch_rep *out_rep; - struct mlx5_core_dev *out_mdev; + struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_core_dev *in_mdev; + int mirror_count; + int out_count; + int action; __be16 vlan_proto; u16 vlan_vid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 8ea11f24380c..7db8b9a41925 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -48,12 +48,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { - struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = {0}; struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; + int j, i = 0; void *misc; - int i = 0; if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); @@ -70,14 +70,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = attr->out_rep->vport; - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { - dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev, vhca_id); - dest[i].vport.vhca_id_valid = 1; + for (j = attr->mirror_count; j < attr->out_count; j++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[j]->vport; + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); + dest[i].vport.vhca_id_valid = 1; + } + i++; } - i++; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw->dev, true); @@ -173,7 +175,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push) vport = in_rep; @@ -194,7 +196,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, goto out_notsupp; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push && in_rep->vport == FDB_UPLINK_VPORT) goto out_notsupp; @@ -245,7 +247,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) { + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -305,7 +307,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) vport->vlan_refcount--; return 0; -- cgit v1.2.3 From e4ad91f23f10a9e7e7775f854584f4d54fe7f299 Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Wed, 16 May 2018 17:54:38 +0900 Subject: net/mlx5e: Split offloaded eswitch TC rules for port mirroring If a TC rule needs to be split for mirroring, create two HW rules, in the first level and the second level flow tables accordingly. In the first level flow table, forward the packet to the mirror port and forward the packet to the second level flow table for further processing, eg. encap, vlan push or header re-write. Currently the matching is repeated in both stages. While here, simplify the setup of the vhca id valid indicator also in the existing code. Signed-off-by: Chris Mi Reviewed-by: Paul Blakey Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 57 +++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 ++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 68 +++++++++++++++++++--- 3 files changed, 108 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 302c5500f9ad..9372d914abe5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -75,12 +75,14 @@ enum { MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), }; +#define MLX5E_TC_MAX_SPLITS 1 + struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; u8 flags; - struct mlx5_flow_handle *rule; + struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head hairpin; /* flows sharing the same hairpin */ @@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { @@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); if (IS_ERR(rule)) goto err_add_rule; + + if (attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr); + if (IS_ERR(flow->rule[1])) + goto err_fwd_rule; + } } return rule; +err_fwd_rule: + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + rule = flow->rule[1]; err_add_rule: if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); @@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } mlx5_eswitch_del_vlan_action(esw, attr); @@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { esw_attr = flow->esw_attr; esw_attr->encap_id = e->encap_id; - flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); continue; } + + if (esw_attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[1])) { + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr); + err = PTR_ERR(flow->rule[1]); + mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n", + err); + continue; + } + } + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; } } @@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } } @@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) continue; list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { neigh_used = true; @@ -2714,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); } else { err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); } - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); if (err != -EAGAIN) goto err_free; } @@ -2796,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) return 0; - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); if (!counter) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 386b0e354c41..b174da2884c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -219,6 +219,10 @@ struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr); void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 7db8b9a41925..cecd201f0b73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -50,6 +50,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, { struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_table *ft = NULL; struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; int j, i = 0; @@ -58,6 +59,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); + if (attr->mirror_count) + ft = esw->fdb_table.offloads.fwd_fdb; + else + ft = esw->fdb_table.offloads.fast_fdb; + flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) @@ -73,11 +79,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, for (j = attr->mirror_count; j < attr->out_count; j++) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].vport.num = attr->out_rep[j]->vport; - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { - dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); - dest[i].vport.vhca_id_valid = 1; - } + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); i++; } } @@ -121,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; - rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.offloads.fast_fdb, - spec, &flow_act, dest, i); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); if (IS_ERR(rule)) goto err_add_rule; else @@ -136,6 +139,57 @@ err_counter_alloc: return rule; } +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *rule; + void *misc; + int i; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + for (i = 0; i < attr->mirror_count; i++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[i]->vport; + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + } + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = esw->fdb_table.offloads.fwd_fdb, + i++; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + if (attr->match_level == MLX5_MATCH_NONE) + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + else + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + + rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); + + if (!IS_ERR(rule)) + esw->offloads.num_flows++; + + return rule; +} + void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, -- cgit v1.2.3 From ddf385e31f574c1c47215b6b1cf53343d7d204d6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 May 2018 18:30:56 +0300 Subject: net/mlx5e: Use WQ API functions instead of direct fields access Use the WQ API to get the WQ size, and to map a counter into a WQ entry index. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 37 ++++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 25 +++++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 8 ++--- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 19 ++++++++++-- 5 files changed, 60 insertions(+), 38 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c5c7a6d687ff..061c4e90692e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -450,7 +450,7 @@ struct mlx5e_icosq { static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { - return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); + return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); } struct mlx5e_dma_info { @@ -956,10 +956,9 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe **wqe, u16 *pi) { - struct mlx5_wq_cyc *wq; + struct mlx5_wq_cyc *wq = &sq->wq; - wq = &sq->wq; - *pi = sq->pc & wq->sz_m1; + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); memset(*wqe, 0, sizeof(**wqe)); } @@ -967,7 +966,7 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, static inline struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { - u16 pi = *pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cee44c21766c..41f57afc5140 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -836,13 +836,15 @@ err_free_rq: static void mlx5e_activate_rq(struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = &rq->channel->icosq; - u16 pi = sq->pc & sq->wq.sz_m1; + struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *nopwqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -885,6 +887,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -894,10 +897,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->min_inline_mode = params->tx_min_inline_mode; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -940,22 +943,23 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS; + sq->edge = mlx5_wq_cyc_get_size(wq) - MLX5E_ICOSQ_MAX_WQEBBS; return 0; @@ -1005,6 +1009,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -1022,10 +1027,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_TLS, &sq->state); param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -1034,7 +1039,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); sq->dim.mode = params->tx_cq_moderation.cq_period_mode; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + sq->edge = mlx5_wq_cyc_get_size(wq) - MLX5_SEND_WQE_MAX_WQEBBS; return 0; @@ -1238,6 +1243,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5e_channel *c = sq->channel; + struct mlx5_wq_cyc *wq = &sq->wq; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ @@ -1246,12 +1252,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) netif_tx_disable_queue(sq->txq); /* last doorbell out, godspeed .. */ - if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) { + if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *nop; - sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL; - nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl); + sq->db.wqe_info[pi].skb = NULL; + nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 53f72923b164..7fd3ec877ba4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -54,7 +54,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, void *data) { - u32 ci = cqcc & cq->wq.fbc.sz_m1; + u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); } @@ -76,10 +76,11 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) { - struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc; - u8 op_own = (cqcc >> fbc->log_sz) & 1; - u32 wq_sz = 1 << fbc->log_sz; - u32 ci = cqcc & fbc->sz_m1; + struct mlx5_cqwq *wq = &cq->wq; + + u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; + u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); + u32 wq_sz = mlx5_cqwq_get_size(wq); u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { @@ -112,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, mpwrq_get_cqe_consumed_strides(&cq->title); else cq->decmprs_wqe_counter = - (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; + mlx5_wq_ll_ctr2ix(&rq->wq, cq->decmprs_wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -395,7 +396,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) int i; /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + while ((pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc)) > sq->edge) { sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } @@ -482,7 +483,7 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; mlx5_cqwq_pop(&cq->wq); @@ -731,7 +732,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *wqe; - u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */ + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -744,7 +745,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, { struct mlx5e_xdpsq *sq = &rq->xdpsq; struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; @@ -1214,7 +1215,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); di = &sq->db.di[ci]; sqcc++; @@ -1239,7 +1240,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) u16 ci; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); di = &sq->db.di[ci]; sq->cc++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2d3f17da5f5c..c9864fcf00b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -331,7 +331,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + while ((pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc)) > sq->edge) { sq->db.wqe_info[pi].skb = NULL; mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq->stats.nop++; @@ -496,7 +496,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -559,7 +559,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) int i; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -606,7 +606,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index f3dfa0ca3c5d..a3572e148f09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -123,9 +123,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) return !equal && !smaller; } +static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) { - return wq->cc & wq->fbc.sz_m1; + return mlx5_cqwq_ctr2ix(wq, wq->cc); } static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) @@ -133,9 +138,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } +static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) { - return wq->cc >> wq->fbc.log_sz; + return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc); } static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) @@ -174,6 +184,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } +static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr) +{ + return ctr & wq->sz_m1; +} + static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return wq->buf + (ix << wq->log_stride); -- cgit v1.2.3 From 043dc78ecf07f3fc5b87270518d7f322aea2f748 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 21 Mar 2018 16:31:08 +0200 Subject: net/mlx5e: TX, Use actual WQE size for SQ edge fill We fill SQ edge with NOPs to avoid WQEs wrap. Here, instead of doing that in advance for the maximum possible WQE size, we do it on-demand using the actual WQE size. We re-order some parts in mlx5e_sq_xmit to finish the calculation of WQE size (ds_cnt) before doing any writes to the WQE buffer. When SQ work queue is fragmented (introduced in an downstream patch), dealing with WQE wraps becomes more frequent. This change would drastically reduce the overhead in this case. Performance tests: ConnectX-5 100Gbps, CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet rate of 64B packets, single transmit ring, size 8K. Before: 14.9 Mpps After: 15.8 Mpps Improvement of 6%. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 27 ++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 213 +++++++++++++-------- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 23 +++ 5 files changed, 178 insertions(+), 92 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 061c4e90692e..3c0f0a0343fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -183,6 +183,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; }; struct mlx5e_rx_wqe { @@ -374,7 +375,6 @@ struct mlx5e_txqsq { struct netdev_queue *txq; u32 sqn; u8 min_inline_mode; - u16 edge; struct device *pdev; __be32 mkey_be; unsigned long state; @@ -439,7 +439,6 @@ struct mlx5e_icosq { struct mlx5_wq_cyc wq; void __iomem *uar_map; u32 sqn; - u16 edge; unsigned long state; /* control path */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41f57afc5140..a8b1e43384ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -959,8 +959,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - sq->edge = mlx5_wq_cyc_get_size(wq) - MLX5E_ICOSQ_MAX_WQEBBS; - return 0; err_sq_wq_destroy: @@ -1039,8 +1037,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); sq->dim.mode = params->tx_cq_moderation.cq_period_mode; - sq->edge = mlx5_wq_cyc_get_size(wq) - MLX5_SEND_WQE_MAX_WQEBBS; - return 0; err_sq_wq_destroy: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7fd3ec877ba4..f4d2c8886492 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -383,6 +383,22 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } +static inline void mlx5e_fill_icosq_edge(struct mlx5e_icosq *sq, + struct mlx5_wq_cyc *wq, + u16 pi) +{ + struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; + u8 nnops = mlx5_wq_cyc_get_size(wq) - pi; + + edge_wi = wi + nnops; + + /* fill sq edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->opcode = MLX5_OPCODE_NOP; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } +} + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -391,14 +407,15 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); - int err; u16 pi; + int err; int i; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc)) > sq->edge) { - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + + if (unlikely(pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_size(wq))) { + mlx5e_fill_icosq_edge(sq, wq, pi); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); } umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index c9864fcf00b9..fc68e72b0b2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -230,13 +230,10 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct } static inline u16 -mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) +mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) { u16 ihs; - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); - if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); sq->stats.tso_inner_packets++; @@ -247,7 +244,6 @@ mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->stats.tso_bytes += skb->len - ihs; } - *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; return ihs; } @@ -300,17 +296,34 @@ dma_unmap_wqe_err: return -ENOMEM; } +static inline void mlx5e_fill_sq_edge(struct mlx5e_txqsq *sq, + struct mlx5_wq_cyc *wq, + u16 pi) +{ + struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; + u8 nnops = mlx5_wq_cyc_get_size(wq) - pi; + + edge_wi = wi + nnops; + + /* fill sq edge with nops to avoid wqe wrap around */ + for (; wi < edge_wi; wi++) { + wi->skb = NULL; + wi->num_wqebbs = 1; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + sq->stats.nop += nnops; +} + static inline void mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, - u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi; wi->num_bytes = num_bytes; wi->num_dma = num_dma; - wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + wi->num_wqebbs = num_wqebbs; wi->skb = skb; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); @@ -329,58 +342,85 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); - - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc)) > sq->edge) { - sq->db.wqe_info[pi].skb = NULL; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); - sq->stats.nop++; - } } +#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) + netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe *wqe, u16 pi) { - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; + u16 ds_cnt, ds_cnt_inl = 0; + u8 num_wqebbs, opcode; + u16 headlen, ihs; + u32 num_bytes; int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + __be16 mss; + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); sq->stats.packets++; } - sq->stats.bytes += num_bytes; + + sq->stats.bytes += num_bytes; sq->stats.xmit_more += skb->xmit_more; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + headlen = skb_len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + if (ihs) { + ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN; + + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) { + mlx5e_fill_sq_edge(sq, wq, pi); + mlx5e_sq_fetch_wqe(sq, &wqe, &pi); + } + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; + if (ihs) { if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); - ihs += VLAN_HLEN; + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, + ihs - VLAN_HLEN, &skb_data, &skb_len); sq->stats.added_vlan_packets++; } else { memcpy(eseg->inline_hdr.start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); } eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) @@ -389,14 +429,12 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->stats.added_vlan_packets++; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; @@ -581,18 +619,6 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } #ifdef CONFIG_MLX5_CORE_IPOIB - -struct mlx5_wqe_eth_pad { - u8 rsvd0[16]; -}; - -struct mlx5i_tx_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_datagram_seg datagram; - struct mlx5_wqe_eth_pad pad; - struct mlx5_wqe_eth_seg eth; -}; - static inline void mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, struct mlx5_wqe_datagram_seg *dseg) @@ -605,59 +631,85 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey) { - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5i_tx_wqe *wqe; - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_datagram_seg *datagram; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; + u16 ds_cnt, ds_cnt_inl = 0; + u8 num_wqebbs, opcode; + u16 headlen, ihs, pi; + u32 num_bytes; int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - memset(wqe, 0, sizeof(*wqe)); + __be16 mss; - mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); - - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); sq->stats.packets++; } - sq->stats.bytes += num_bytes; + sq->stats.bytes += num_bytes; sq->stats.xmit_more += skb->xmit_more; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + headlen = skb_len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + if (ihs) { + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) { + mlx5e_fill_sq_edge(sq, wq, pi); + mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + } + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + datagram = &wqe->datagram; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; + if (ihs) { memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; @@ -667,5 +719,4 @@ err_drop: return NETDEV_TX_OK; } - #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 6d9053bcbe95..45a11864e544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -93,6 +93,29 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void); /* Extract mlx5e_priv from IPoIB netdev */ #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) +struct mlx5_wqe_eth_pad { + u8 rsvd0[16]; +}; + +struct mlx5i_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_datagram_seg datagram; + struct mlx5_wqe_eth_pad pad; + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; +}; + +static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, + struct mlx5i_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(*wqe, 0, sizeof(**wqe)); +} + netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -- cgit v1.2.3 From 549322f2f988b2ced90656727c0b4850e4b49fc6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 9 May 2018 13:46:51 +0300 Subject: net/mlx5i: Use compilation flag in IPOIB header If CONFIG_MLX5_CORE_IPOIB is not set, compile-out the IPOIB related headers. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 45a11864e544..08eac92fc26c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -33,6 +33,8 @@ #ifndef __MLX5E_IPOB_H__ #define __MLX5E_IPOB_H__ +#ifdef CONFIG_MLX5_CORE_IPOIB + #include #include "en.h" @@ -120,4 +122,5 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ -- cgit v1.2.3 From 3a2f70331226c140e5aa27ee6bbe2a5c618acb4c Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 4 Apr 2018 12:54:23 +0300 Subject: net/mlx5: Use order-0 allocations for all WQ types Complete the transition of all WQ types to use fragmented order-0 coherent memory instead of high-order allocations. CQ-WQ already uses order-0. Here we do the same for cyclic and linked-list WQs. This allows the driver to load cleanly on systems with a highly fragmented coherent memory. Performance tests: ConnectX-5 100Gbps, CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet rate of 64B packets, single transmit ring, size 8K. No degradation is sensed. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 17 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 24 +++--- .../net/ethernet/mellanox/mlx5/core/fpga/conn.c | 14 ++-- .../net/ethernet/mellanox/mlx5/core/fpga/conn.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 94 +++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 33 ++++---- include/linux/mlx5/driver.h | 16 +++- 9 files changed, 123 insertions(+), 94 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3c0f0a0343fd..9396db54973f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -314,7 +314,7 @@ struct mlx5e_cq { /* control */ struct mlx5_core_dev *mdev; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; struct mlx5e_tx_wqe_info { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a8b1e43384ca..0c167e5fc346 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -646,8 +646,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); - mlx5_fill_page_array(&rq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); @@ -1096,7 +1096,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); - mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&csp->wq_ctrl->buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_sq(mdev, in, inlen, sqn); @@ -1538,7 +1539,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, static void mlx5e_free_cq(struct mlx5e_cq *cq) { - mlx5_cqwq_destroy(&cq->wq_ctrl); + mlx5_wq_destroy(&cq->wq_ctrl); } static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) @@ -1554,7 +1555,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * cq->wq_ctrl.frag_buf.npages; + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1563,7 +1564,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) memcpy(cqc, param->cqc, sizeof(param->cqc)); - mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); @@ -1571,7 +1572,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f4d2c8886492..ac54380d41e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -383,16 +383,16 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } -static inline void mlx5e_fill_icosq_edge(struct mlx5e_icosq *sq, - struct mlx5_wq_cyc *wq, - u16 pi) +static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) { struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; - u8 nnops = mlx5_wq_cyc_get_size(wq) - pi; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; edge_wi = wi + nnops; - /* fill sq edge with nops to avoid wqe wrapping two pages */ + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { wi->opcode = MLX5_OPCODE_NOP; mlx5e_post_nop(wq, sq->sqn, &sq->pc); @@ -407,14 +407,15 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); - u16 pi; + u16 pi, frag_pi; int err; int i; pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); - if (unlikely(pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_size(wq))) { - mlx5e_fill_icosq_edge(sq, wq, pi); + if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi); pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index fc68e72b0b2b..d37566be06e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -296,16 +296,16 @@ dma_unmap_wqe_err: return -ENOMEM; } -static inline void mlx5e_fill_sq_edge(struct mlx5e_txqsq *sq, - struct mlx5_wq_cyc *wq, - u16 pi) +static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) { struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; - u8 nnops = mlx5_wq_cyc_get_size(wq) - pi; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; edge_wi = wi + nnops; - /* fill sq edge with nops to avoid wqe wrap around */ + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { wi->skb = NULL; wi->num_wqebbs = 1; @@ -358,8 +358,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; u16 ds_cnt, ds_cnt_inl = 0; + u16 headlen, ihs, frag_pi; u8 num_wqebbs, opcode; - u16 headlen, ihs; u32 num_bytes; int num_dma; __be16 mss; @@ -395,8 +395,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) { - mlx5e_fill_sq_edge(sq, wq, pi); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); } @@ -642,9 +643,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; + u16 headlen, ihs, pi, frag_pi; u16 ds_cnt, ds_cnt_inl = 0; u8 num_wqebbs, opcode; - u16 headlen, ihs, pi; u32 num_bytes; int num_dma; __be16 mss; @@ -680,8 +681,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) { - mlx5e_fill_sq_edge(sq, wq, pi); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); mlx5i_sq_fetch_wqe(sq, &wqe, &pi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index de7fe087d6fe..4e5a5cf25f17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) } inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages; + sizeof(u64) * conn->cq.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; @@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); - MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); - mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas); + mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); kvfree(in); @@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) goto out; err_cqwq: - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); out: return err; } @@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) tasklet_disable(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); } static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) @@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); - mlx5_fill_page_array(&conn->qp.wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); + mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h index 44bd9eccc711..634ae10e287b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h @@ -54,7 +54,7 @@ struct mlx5_fpga_conn { /* CQ */ struct { struct mlx5_cqwq wq; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; struct mlx5_core_cq mcq; struct tasklet_struct tasklet; } cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index ea66448ba365..5b8b35392025 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -36,7 +36,12 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; +} + +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->fbc.frag_sz_m1 + 1; } u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) @@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; } static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) { - return mlx5_wq_cyc_get_size(wq) << wq->log_stride; + return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride; } static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) @@ -67,17 +72,19 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) { - return mlx5_wq_ll_get_size(wq) << wq->log_stride; + return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride; } int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; int err; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -85,14 +92,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + fbc->frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -105,17 +112,35 @@ err_db_free: return err; } +static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, + struct mlx5_wq_qp *qp) +{ + struct mlx5_frag_buf *rqb, *sqb; + + rqb = &qp->rq.fbc.frag_buf; + *rqb = *buf; + rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + rqb->npages = 1 << get_order(rqb->size); + + sqb = &qp->sq.fbc.frag_buf; + *sqb = *buf; + sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + sqb->npages = 1 << get_order(sqb->size); + sqb->frags += rqb->npages; /* first part is for the rq */ +} + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { int err; - wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4; - wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1; - - wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB); - wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1; + mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, + MLX5_GET(qpc, qpc, log_rq_size), + &wq->rq.fbc); + mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), + MLX5_GET(qpc, qpc, log_sq_size), + &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -123,15 +148,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->rq.buf = wq_ctrl->buf.frags->buf; - wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); + mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); + wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -147,7 +172,7 @@ err_db_free: int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl) + struct mlx5_wq_ctrl *wq_ctrl) { int err; @@ -160,7 +185,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, } err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), - &wq_ctrl->frag_buf, + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", @@ -168,7 +193,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->fbc.frag_buf = wq_ctrl->frag_buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -185,12 +210,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; struct mlx5_wqe_srq_next_seg *next_seg; int err; int i; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -198,17 +225,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; - for (i = 0; i < wq->sz_m1; i++) { + for (i = 0; i < fbc->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg->next_wqe_index = cpu_to_be16(i + 1); } @@ -227,12 +254,7 @@ err_db_free: void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) { - mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); } -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl) -{ - mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf); - mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index a3572e148f09..b9d7c01fc7cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -48,17 +48,9 @@ struct mlx5_wq_ctrl { struct mlx5_db db; }; -struct mlx5_frag_wq_ctrl { - struct mlx5_core_dev *mdev; - struct mlx5_frag_buf frag_buf; - struct mlx5_db db; -}; - struct mlx5_wq_cyc { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; - u16 sz_m1; - u8 log_stride; }; struct mlx5_wq_qp { @@ -73,20 +65,19 @@ struct mlx5_cqwq { }; struct mlx5_wq_ll { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; __be16 *tail_next; - u16 sz_m1; u16 head; u16 wqe_ctr; u16 cur_sz; - u8 log_stride; }; int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, @@ -94,7 +85,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl); + struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, @@ -103,16 +94,20 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl); static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { - return ctr & wq->sz_m1; + return ctr & wq->fbc.sz_m1; +} + +static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.frag_sz_m1; } static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) @@ -176,7 +171,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) { - return wq->cur_sz == wq->sz_m1; + return wq->cur_sz == wq->fbc.sz_m1; } static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) @@ -186,12 +181,12 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr) { - return ctr & wq->sz_m1; + return ctr & wq->fbc.sz_m1; } static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 92d292454351..80cbb7fdce4a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -983,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } -static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, - void *cqc) +static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, + struct mlx5_frag_buf_ctrl *fbc) { - fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); - fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; fbc->sz_m1 = (1 << fbc->log_sz) - 1; fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; } +static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, + void *cqc) +{ + mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz), + MLX5_GET(cqc, cqc, log_cq_size), + fbc); +} + static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, u32 ix) { -- cgit v1.2.3 From 6ab75516cf35c4789ca8baa206b21fdbc03c7870 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 2 Mar 2018 22:21:32 -0800 Subject: net/mlx5e: Move phy link down events counter out of SW stats PHY link down events counter belongs to phy_counters group. although it has special handling, it doesn't mean it can't be there. Move it to phy_counters_grp handler. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 37 +++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 3 -- 2 files changed, 22 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e17919c0af08..973939ed8bb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -81,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) @@ -175,9 +174,6 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) } } - s->link_down_events_phy = MLX5_GET(ppcnt_reg, - priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events); memcpy(&priv->stats.sw, s, sizeof(*s)); } @@ -580,12 +576,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = { { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; -#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) { + /* "1" for link_down_events special counter */ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_COUNTERS : 0; + NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1; } static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -593,10 +590,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_stats_desc[i].format); + strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); return idx; } @@ -604,11 +605,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_stats_desc, i); + /* link_down_events_phy has special handling since it is not stored in __be64 format */ + data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a36e6a87066b..39ced559929a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -97,9 +97,6 @@ struct mlx5e_sw_stats { u64 tx_tls_ooo; u64 tx_tls_resync_bytes; #endif - - /* Special handling counters */ - u64 link_down_events_phy; }; struct mlx5e_qcounter_stats { -- cgit v1.2.3 From 868a01a27d80a59e719f0c369d1b26b923fc7674 Mon Sep 17 00:00:00 2001 From: Shalom Lagziel Date: Mon, 12 Feb 2018 17:43:07 +0200 Subject: net/mlx5e: Introducing new statistics rwlock Introduce a new read/write lock that will protect statistics gathering from netdev channels configuration changes. e.g. when channels are being replaced (increase/decrease number of rings) prevent statistic gathering (ndo_get_stats64) to read the statistics of in-active channels (channels that are being closed). Plus update channels software statistics on the fly when calling ndo_get_stats64, and remove it from stats periodic work. Fixes: 9218b44dcc05 ("net/mlx5e: Statistics handling refactoring") Signed-off-by: Shalom Lagziel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 16 +++++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 8 ++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ 5 files changed, 27 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9396db54973f..c3c79f2835d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -771,6 +771,8 @@ struct mlx5e_priv { struct mutex state_lock; /* Protects Interface state */ struct mlx5e_rq drop_rq; + rwlock_t stats_lock; /* Protects channels SW stats updates */ + bool channels_active; struct mlx5e_channels channels; u32 tisn[MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0c167e5fc346..0e9c64580abb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2658,6 +2658,9 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_channels_tx_maps(priv); mlx5e_activate_channels(&priv->channels); + write_lock(&priv->stats_lock); + priv->channels_active = true; + write_unlock(&priv->stats_lock); netif_tx_start_all_queues(priv->netdev); if (MLX5_VPORT_MANAGER(priv->mdev)) @@ -2679,6 +2682,9 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + write_lock(&priv->stats_lock); + priv->channels_active = false; + write_unlock(&priv->stats_lock); mlx5e_deactivate_channels(&priv->channels); } @@ -3223,6 +3229,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); } else { + mlx5e_grp_sw_update_stats(priv); stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -4248,6 +4255,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, profile->max_nch(mdev), netdev->mtu); mutex_init(&priv->state_lock); + rwlock_init(&priv->stats_lock); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c3034f58aa33..1a3f9e091385 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -130,6 +130,10 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) struct mlx5e_sq_stats *sq_stats; int i, j; + read_lock(&priv->stats_lock); + if (!priv->channels_active) + goto out; + memset(s, 0, sizeof(*s)); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -146,12 +150,8 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) s->tx_bytes += sq_stats->bytes; } } -} - -static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) -{ - mlx5e_rep_update_sw_counters(priv); - mlx5e_rep_update_hw_counters(priv); +out: + read_unlock(&priv->stats_lock); } static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, @@ -871,6 +871,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sw_stats *sstats = &priv->stats.sw; + mlx5e_rep_update_sw_counters(priv); + stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -1046,7 +1048,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_rep_update_stats, + .update_stats = mlx5e_rep_update_hw_counters, .max_nch = mlx5e_get_rep_max_num_channels, .update_carrier = NULL, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 973939ed8bb5..323f2af4200b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -108,7 +108,7 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } -static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) { struct mlx5e_sw_stats temp, *s = &temp; struct mlx5e_rq_stats *rq_stats; @@ -117,6 +117,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) int i, j; memset(s, 0, sizeof(*s)); + read_lock(&priv->stats_lock); + if (!priv->channels_active) + goto out; for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -175,6 +178,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) } memcpy(&priv->stats.sw, s, sizeof(*s)); +out: + read_unlock(&priv->stats_lock); } static const struct counter_desc q_stats_desc[] = { @@ -1224,7 +1229,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_sw_get_num_stats, .fill_strings = mlx5e_grp_sw_fill_strings, .fill_stats = mlx5e_grp_sw_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, .update_stats = mlx5e_grp_sw_update_stats, }, { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 39ced559929a..390c7afa5188 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -239,4 +239,6 @@ struct mlx5e_stats_grp { extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv); + #endif /* __MLX5_EN_STATS_H__ */ -- cgit v1.2.3 From 05909babce5328f468f7ac3a1033431c895f97a5 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Thu, 12 Apr 2018 16:03:37 +0300 Subject: net/mlx5e: Avoid reset netdev stats on configuration changes Move all RQ, SQ and channel counters from the channel objects into the priv structure. With this change, counters will not be reset upon channel configuration changes. Channel's statistics for SQs which are associated with TCs higher than zero will be presented in ethtool -S, only for SQs which were opened at least once since the module was loaded (regardless of their open/close current status). This is done in order to decrease the total amount of statistics presented and calculated for the common out of box use (no QoS). mlx5e_channel_stats is a compound of CH,RQ,SQs stats in order to create locality for the NAPI when handling TX and RX of the same channel. Align the new statistics struct per ring to avoid several channels update to the same cache line at the same time. Packet rate was tested, no degradation sensed. Signed-off-by: Eran Ben Elisha CC: Qing Huang Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 14 +++- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 28 +++++--- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 75 ++++++++++++---------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 56 ++++++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 49 +++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 6 +- 8 files changed, 136 insertions(+), 100 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c3c79f2835d2..1c04df043e07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -358,7 +358,6 @@ struct mlx5e_txqsq { /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; u32 dma_fifo_pc; - struct mlx5e_sq_stats stats; struct mlx5e_cq cq; @@ -371,6 +370,7 @@ struct mlx5e_txqsq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; + struct mlx5e_sq_stats *stats; void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; @@ -526,7 +526,7 @@ struct mlx5e_rq { struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; - struct mlx5e_rq_stats stats; + struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; @@ -574,7 +574,7 @@ struct mlx5e_channel { /* data path - accessed per napi poll */ struct irq_desc *irq_desc; - struct mlx5e_ch_stats stats; + struct mlx5e_ch_stats *stats; /* control */ struct mlx5e_priv *priv; @@ -590,6 +590,12 @@ struct mlx5e_channels { struct mlx5e_params params; }; +struct mlx5e_channel_stats { + struct mlx5e_ch_stats ch; + struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_rq_stats rq; +} ____cacheline_aligned_in_smp; + enum mlx5e_traffic_types { MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV6_TCP, @@ -793,6 +799,8 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; + struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + u8 max_opened_tc; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index ad2790fb5966..15aef71d1957 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, int headln; int i; - sq->stats.tls_ooo++; + sq->stats->tls_ooo++; if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { /* We might get here if a retransmission reaches the driver @@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, skb_shinfo(nskb)->nr_frags = info.nr_frags; nskb->data_len = info.sync_len; nskb->len += info.sync_len; - sq->stats.tls_resync_bytes += nskb->len; + sq->stats->tls_resync_bytes += nskb->len; mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, cpu_to_be64(info.rcd_sn)); mlx5e_sq_xmit(sq, nskb, *wqe, *pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0e9c64580abb..9b19863b059d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -423,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->ix = c->ix; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -1003,7 +1004,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; @@ -1018,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); @@ -1176,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; int err; - err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq); + err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); if (err) return err; @@ -1370,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work) return; mlx5e_reset_txqsq_cc_pc(sq); - sq->stats.recover++; + sq->stats->recover++; recover->last_recover = jiffies; mlx5e_activate_txqsq(sq); } @@ -1665,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - int err; - int tc; + struct mlx5e_priv *priv = c->priv; + int err, tc, max_nch = priv->profile->max_nch(priv->mdev); for (tc = 0; tc < params->num_tc; tc++) { - int txq_ix = c->ix + tc * params->num_channels; + int txq_ix = c->ix + tc * max_nch; err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, - params, &cparam->sq, &c->sq[tc]); + params, &cparam->sq, &c->sq[tc], tc); if (err) goto err_close_sqs; } @@ -1802,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; + c->stats = &priv->channel_stats[ix].ch; mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); @@ -2634,7 +2639,7 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq; int i, tc; - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) for (tc = 0; tc < priv->profile->max_tc; tc++) priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; @@ -3139,6 +3144,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, if (err) goto out; + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + new_channels.params.num_tc); mlx5e_switch_priv_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); @@ -3826,7 +3833,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, return false; netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); - sq->channel->stats.eq_rearm++; + sq->channel->stats->eq_rearm++; return true; } @@ -4250,6 +4257,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; + priv->max_opened_tc = 1; mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev), netdev->mtu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 1a3f9e091385..de6364125f0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -138,13 +138,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; + rq_stats = c->rq.stats; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + sq_stats = c->sq[j].stats; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ac54380d41e4..bfef73b37fbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, mlx5e_read_cqe_slot(cq, cqcc, &cq->title); cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); - rq->stats.cqe_compress_blks++; + rq->stats->cqe_compress_blks++; } static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) @@ -146,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); cq->wq.cc = cqcc; cq->decmprs_left -= cqe_count; - rq->stats.cqe_compress_pkts += cqe_count; + rq->stats->cqe_compress_pkts += cqe_count; return cqe_count; } @@ -176,14 +176,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, { struct mlx5e_page_cache *cache = &rq->page_cache; u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); + struct mlx5e_rq_stats *stats = rq->stats; if (tail_next == cache->head) { - rq->stats.cache_full++; + stats->cache_full++; return false; } if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { - rq->stats.cache_waive++; + stats->cache_waive++; return false; } @@ -196,20 +197,21 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { struct mlx5e_page_cache *cache = &rq->page_cache; + struct mlx5e_rq_stats *stats = rq->stats; if (unlikely(cache->head == cache->tail)) { - rq->stats.cache_empty++; + stats->cache_empty++; return false; } if (page_ref_count(cache->page_cache[cache->head].page) != 1) { - rq->stats.cache_busy++; + stats->cache_busy++; return false; } *dma_info = cache->page_cache[cache->head]; cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); - rq->stats.cache_reuse++; + stats->cache_reuse++; dma_sync_single_for_device(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), @@ -294,7 +296,7 @@ static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi) { if (mlx5e_page_reuse(rq, wi)) { - rq->stats.page_reuse++; + rq->stats->page_reuse++; return; } @@ -452,7 +454,7 @@ err_unmap: dma_info--; mlx5e_page_release(rq, dma_info, true); } - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return err; } @@ -480,7 +482,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); if (unlikely(err)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; break; } @@ -652,6 +654,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, struct sk_buff *skb, bool lro) { + struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) @@ -659,7 +662,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } @@ -674,7 +677,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); - rq->stats.csum_complete++; + stats->csum_complete++; return; } @@ -684,15 +687,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; skb->encapsulation = 1; - rq->stats.csum_unnecessary_inner++; + stats->csum_unnecessary_inner++; return; } - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } csum_none: skb->ip_summed = CHECKSUM_NONE; - rq->stats.csum_none++; + stats->csum_none++; } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, @@ -701,6 +704,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + struct mlx5e_rq_stats *stats = rq->stats; struct net_device *netdev = rq->netdev; skb->mac_len = ETH_HLEN; @@ -710,9 +714,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ - rq->stats.packets += lro_num_seg - 1; - rq->stats.lro_packets++; - rq->stats.lro_bytes += cqe_bcnt; + stats->packets += lro_num_seg - 1; + stats->lro_packets++; + stats->lro_bytes += cqe_bcnt; } if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) @@ -727,7 +731,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (cqe_has_vlan(cqe)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); - rq->stats.removed_vlan_packets++; + stats->removed_vlan_packets++; } skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; @@ -741,8 +745,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } @@ -774,10 +780,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, dma_addr_t dma_addr = di->addr + data_offset; unsigned int dma_len = xdp->data_end - xdp->data; + struct mlx5e_rq_stats *stats = rq->stats; + prefetchw(wqe); if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { - rq->stats.xdp_drop++; + stats->xdp_drop++; return false; } @@ -787,7 +795,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, mlx5e_xmit_xdp_doorbell(sq); sq->db.doorbell = false; } - rq->stats.xdp_tx_full++; + stats->xdp_tx_full++; return false; } @@ -821,7 +829,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.doorbell = true; - rq->stats.xdp_tx++; + stats->xdp_tx++; return true; } @@ -868,7 +876,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, case XDP_ABORTED: trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: - rq->stats.xdp_drop++; + rq->stats->xdp_drop++; return true; } } @@ -881,7 +889,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, struct sk_buff *skb = build_skb(va, frag_size); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -913,7 +921,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, wi->offset += frag_size; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; return NULL; } @@ -1030,7 +1038,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w skb = napi_alloc_skb(rq->cq.napi, ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -1116,12 +1124,12 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi->consumed_strides += cstrides; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; goto mpwrq_cqe_out; } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats.mpwqe_filler++; + rq->stats->mpwqe_filler++; goto mpwrq_cqe_out; } @@ -1276,6 +1284,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { + struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; struct net_device *netdev; struct mlx5e_priv *priv; @@ -1337,9 +1346,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->dev = netdev; - rq->stats.csum_complete++; - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + stats->csum_complete++; + stats->packets++; + stats->bytes += cqe_bcnt; } void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 323f2af4200b..776b4d68e156 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -111,20 +111,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) { struct mlx5e_sw_stats temp, *s = &temp; - struct mlx5e_rq_stats *rq_stats; - struct mlx5e_sq_stats *sq_stats; - struct mlx5e_ch_stats *ch_stats; - int i, j; + int i; memset(s, 0, sizeof(*s)); read_lock(&priv->stats_lock); if (!priv->channels_active) goto out; - for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; - ch_stats = &c->stats; + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats = + &priv->channel_stats[i]; + struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; + struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; + int j; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; @@ -151,8 +150,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_waive += rq_stats->cache_waive; s->ch_eq_rearm += ch_stats->eq_rearm; - for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -1160,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = { static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { - return (NUM_RQ_STATS * priv->channels.num) + - (NUM_CH_STATS * priv->channels.num) + - (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); + int max_nch = priv->profile->max_nch(priv->mdev); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * priv->max_opened_tc); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) { + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return idx; - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, ch_stats_desc[j].format, i); - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < priv->channels.num; i++) + /* priv->channel_tc2txq[i][tc] is valid only when device is open */ + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, @@ -1195,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { - struct mlx5e_channels *channels = &priv->channels; + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return idx; - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, ch_stats_desc, j); - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < channels->num; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); return idx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d37566be06e1..aafd75257fd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -220,28 +220,29 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; - sq->stats.csum_partial_inner++; + sq->stats->csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; - sq->stats.csum_partial++; + sq->stats->csum_partial++; } } else - sq->stats.csum_none++; + sq->stats->csum_none++; } static inline u16 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) { + struct mlx5e_sq_stats *stats = sq->stats; u16 ihs; if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - sq->stats.tso_inner_packets++; - sq->stats.tso_inner_bytes += skb->len - ihs; + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - sq->stats.tso_packets++; - sq->stats.tso_bytes += skb->len - ihs; + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; } return ihs; @@ -311,7 +312,7 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, wi->num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } - sq->stats.nop += nnops; + sq->stats->nop += nnops; } static inline void @@ -337,7 +338,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->pc += wi->num_wqebbs; if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); - sq->stats.stopped++; + sq->stats->stopped++; } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) @@ -355,6 +356,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_data_seg *dseg; struct mlx5e_tx_wqe_info *wi; + struct mlx5e_sq_stats *stats = sq->stats; unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; u16 ds_cnt, ds_cnt_inl = 0; @@ -371,17 +373,17 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, mss = cpu_to_be16(skb_shinfo(skb)->gso_size); ihs = mlx5e_tx_get_gso_ihs(sq, skb); num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; - sq->stats.packets += skb_shinfo(skb)->gso_segs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { opcode = MLX5_OPCODE_SEND; mss = 0; ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; headlen = skb_len - ihs - skb->data_len; ds_cnt += !!headlen; @@ -415,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs - VLAN_HLEN, &skb_data, &skb_len); - sq->stats.added_vlan_packets++; + stats->added_vlan_packets++; } else { memcpy(eseg->inline_hdr.start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); @@ -427,7 +429,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); - sq->stats.added_vlan_packets++; + stats->added_vlan_packets++; } num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); @@ -440,7 +442,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -524,7 +526,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats.cqe_err++; + sq->stats->cqe_err++; } do { @@ -584,7 +586,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats.wake++; + sq->stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); @@ -641,6 +643,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_data_seg *dseg; struct mlx5e_tx_wqe_info *wi; + struct mlx5e_sq_stats *stats = sq->stats; unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; u16 headlen, ihs, pi, frag_pi; @@ -659,17 +662,17 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, mss = cpu_to_be16(skb_shinfo(skb)->gso_size); ihs = mlx5e_tx_get_gso_ihs(sq, skb); num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; - sq->stats.packets += skb_shinfo(skb)->gso_segs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { opcode = MLX5_OPCODE_SEND; mss = 0; ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; headlen = skb_len - ihs - skb->data_len; ds_cnt += !!headlen; @@ -716,7 +719,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 5d6f9ce2bf80..1b17f682693b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) { + struct mlx5e_sq_stats *stats = sq->stats; struct net_dim_sample dim_sample; if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) return; - net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, + net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); net_dim(&sq->dim, dim_sample); } static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) { + struct mlx5e_rq_stats *stats = rq->stats; struct net_dim_sample dim_sample; if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) return; - net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, + net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); net_dim(&rq->dim, dim_sample); } -- cgit v1.2.3 From 1b40428c04b1f89aa71db0310638d26e6eaaac07 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 24 May 2018 04:57:51 -0700 Subject: bnx2x: Collect the device debug information during Tx timeout. Tx-timeout mostly happens due to some issue in the device. In such cases, debug dump would be helpful for identifying the cause of the issue. This patch adds support to spill debug data during the Tx timeout. Here bnx2x_panic_dump() API is used instead of bnx2x_panic(), since we still want to allow the Tx-timeout recovery a chance to succeed. Changes from previous version: ------------------------------- v2: Fixed a coding error. Please consider applying this to "net-next". Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 95871576ab92..8cd73ff5debc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4962,8 +4962,13 @@ void bnx2x_tx_timeout(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); -#ifdef BNX2X_STOP_ON_ERROR + /* We want the information of the dump logged, + * but calling bnx2x_panic() would kill all chances of recovery. + */ if (!bp->panic) +#ifndef BNX2X_STOP_ON_ERROR + bnx2x_panic_dump(bp, false); +#else bnx2x_panic(); #endif -- cgit v1.2.3 From 29555fa3de865630570b5f53c847b953413daf1a Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 24 May 2018 16:09:07 +0200 Subject: net: stmmac: Use mutex instead of spinlock Some drivers, such as DWC EQOS on Tegra, need to perform operations that can sleep under this lock (clk_set_rate() in tegra_eqos_fix_speed()) for proper operation. Since there is no need for this lock to be a spinlock, convert it to a mutex instead. Fixes: e6ea2d16fc61 ("net: stmmac: dwc-qos: Add Tegra186 support") Reported-by: Jon Hunter Signed-off-by: Thierry Reding Tested-by: Bhadram Varka Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 12 ++++----- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 31 ++++++++++------------ 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 4d425b1a0c59..fbfe5dcefa87 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -122,7 +122,7 @@ struct stmmac_priv { struct net_device *dev; struct device *device; struct mac_device_info *hw; - spinlock_t lock; + struct mutex lock; /* RX Queue */ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 6d82b3ef5c3b..5710864fa809 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -390,9 +390,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); - spin_lock(&priv->lock); + mutex_lock(&priv->lock); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); - spin_unlock(&priv->lock); + mutex_unlock(&priv->lock); return 0; } @@ -632,12 +632,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->wolopts = priv->wolopts; } - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -666,9 +666,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) disable_irq_wake(priv->wol_irq); } - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); priv->wolopts = wol->wolopts; - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c32de53a00d3..77af85c981db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -381,7 +381,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; int interface = priv->plat->interface; - unsigned long flags; bool ret = false; if ((interface != PHY_INTERFACE_MODE_MII) && @@ -408,7 +407,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (priv->eee_active) { netdev_dbg(priv->dev, "disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); @@ -416,11 +415,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) tx_lpi_timer); } priv->eee_active = 0; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); goto out; } /* Activate the EEE and start timers */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (!priv->eee_active) { priv->eee_active = 1; timer_setup(&priv->eee_ctrl_timer, @@ -435,7 +434,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); ret = true; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); } @@ -811,13 +810,12 @@ static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - unsigned long flags; bool new_state = false; if (!phydev) return; - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (phydev->link) { u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); @@ -876,7 +874,7 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (phydev->is_pseudo_fixed_link) /* Stop PHY layer to call the hook to adjust the link in case @@ -4275,7 +4273,7 @@ int stmmac_dvr_probe(struct device *device, (8 * priv->plat->rx_queues_to_use)); } - spin_lock_init(&priv->lock); + mutex_init(&priv->lock); /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be @@ -4359,6 +4357,7 @@ int stmmac_dvr_remove(struct device *dev) priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); + mutex_destroy(&priv->lock); free_netdev(ndev); return 0; @@ -4376,7 +4375,6 @@ int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!ndev || !netif_running(ndev)) return 0; @@ -4384,7 +4382,7 @@ int stmmac_suspend(struct device *dev) if (ndev->phydev) phy_stop(ndev->phydev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); netif_device_detach(ndev); stmmac_stop_all_queues(priv); @@ -4405,7 +4403,7 @@ int stmmac_suspend(struct device *dev) clk_disable(priv->plat->pclk); clk_disable(priv->plat->stmmac_clk); } - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); priv->oldlink = false; priv->speed = SPEED_UNKNOWN; @@ -4450,7 +4448,6 @@ int stmmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!netif_running(ndev)) return 0; @@ -4462,9 +4459,9 @@ int stmmac_resume(struct device *dev) * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device)) { - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); @@ -4478,7 +4475,7 @@ int stmmac_resume(struct device *dev) netif_device_attach(ndev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -4492,7 +4489,7 @@ int stmmac_resume(struct device *dev) stmmac_start_all_queues(priv); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (ndev->phydev) phy_start(ndev->phydev); -- cgit v1.2.3 From cb1603948a0b80f27d2525bb7ac0d4bbb3849926 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 24 May 2018 17:49:35 +0200 Subject: vrf: add CRC32c offload to device features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SCTP sockets originated in a VRF can improve their performance if CRC32c computation is delegated to underlying devices: update device features, setting NETIF_F_SCTP_CRC. Iterating the following command in the topology proposed with [1], # ip vrf exec vrf-h2 netperf -H 192.0.2.1 -t SCTP_STREAM -- -m 10K the measured throughput in Mbit/s improved from 2395 ± 1% to 2720 ± 1%. [1] https://www.spinics.net/lists/netdev/msg486007.html Signed-off-by: Davide Caratti Reviewed-by: Marcelo Ricardo Leitner Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 90b5f3900c22..f93547f257fb 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1254,7 +1254,7 @@ static void vrf_setup(struct net_device *dev) /* enable offload features */ dev->features |= NETIF_F_GSO_SOFTWARE; - dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->hw_features = dev->features; -- cgit v1.2.3 From 1ff78076d8dd182c883e6caa285479ae7db32ef8 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:14 -0700 Subject: netvsc: refactor notifier/event handling code to use the failover framework Use the registration/notification framework supported by the generic failover infrastructure. Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- drivers/net/hyperv/Kconfig | 1 + drivers/net/hyperv/hyperv_net.h | 2 + drivers/net/hyperv/netvsc_drv.c | 222 +++++++++++----------------------------- 3 files changed, 60 insertions(+), 165 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 0765d5f61714..23a2d145813a 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -2,5 +2,6 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV select UCS2_STRING + select FAILOVER help Select this option to enable the Hyper-V virtual network driver. diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1be34d2e3563..99d8e7398a5b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -932,6 +932,8 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; + + struct failover *failover; }; /* Per channel data */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 60a5769ef5a1..ebe964203eff 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "hyperv_net.h" @@ -1779,46 +1780,6 @@ out_unlock: rtnl_unlock(); } -static struct net_device *get_netvsc_bymac(const u8 *mac) -{ - struct net_device *dev; - - ASSERT_RTNL(); - - for_each_netdev(&init_net, dev) { - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ - - if (ether_addr_equal(mac, dev->perm_addr)) - return dev; - } - - return NULL; -} - -static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) -{ - struct net_device *dev; - - ASSERT_RTNL(); - - for_each_netdev(&init_net, dev) { - struct net_device_context *net_device_ctx; - - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ - - net_device_ctx = netdev_priv(dev); - if (!rtnl_dereference(net_device_ctx->nvdev)) - continue; /* device is removed */ - - if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) - return dev; /* a match */ - } - - return NULL; -} - /* Called when VF is injecting data into network stack. * Change the associated network device from VF to netvsc. * note: already called with rcu_read_lock @@ -1841,46 +1802,6 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) return RX_HANDLER_ANOTHER; } -static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) -{ - struct net_device_context *ndev_ctx = netdev_priv(ndev); - int ret; - - ret = netdev_rx_handler_register(vf_netdev, - netvsc_vf_handle_frame, ndev); - if (ret != 0) { - netdev_err(vf_netdev, - "can not register netvsc VF receive handler (err = %d)\n", - ret); - goto rx_handler_failed; - } - - ret = netdev_master_upper_dev_link(vf_netdev, ndev, - NULL, NULL, NULL); - if (ret != 0) { - netdev_err(vf_netdev, - "can not set master device %s (err = %d)\n", - ndev->name, ret); - goto upper_link_failed; - } - - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); - - call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); - - netdev_info(vf_netdev, "joined to %s\n", ndev->name); - return 0; - -upper_link_failed: - netdev_rx_handler_unregister(vf_netdev); -rx_handler_failed: - return ret; -} - static void __netvsc_vf_setup(struct net_device *ndev, struct net_device *vf_netdev) { @@ -1931,85 +1852,95 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_pre_register_vf(struct net_device *vf_netdev, + struct net_device *ndev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; - if (vf_netdev->addr_len != ETH_ALEN) - return NOTIFY_DONE; - - /* - * We will use the MAC address to locate the synthetic interface to - * associate with the VF interface. If we don't find a matching - * synthetic interface, move on. - */ - ndev = get_netvsc_bymac(vf_netdev->perm_addr); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) - return NOTIFY_DONE; + return -ENODEV; + + return 0; +} + +static int netvsc_register_vf(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); - if (netvsc_vf_join(vf_netdev, ndev) != 0) - return NOTIFY_DONE; + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; - netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); dev_hold(vf_netdev); - rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); - return NOTIFY_OK; + rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev); + + return 0; } /* VF up/down change detected, schedule to change data path */ -static int netvsc_vf_changed(struct net_device *vf_netdev) +static int netvsc_vf_changed(struct net_device *vf_netdev, + struct net_device *ndev) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; - struct net_device *ndev; bool vf_is_up = netif_running(vf_netdev); - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev) - return NOTIFY_DONE; + return -ENODEV; netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); - return NOTIFY_OK; + return 0; } -static int netvsc_unregister_vf(struct net_device *vf_netdev) +static int netvsc_pre_unregister_vf(struct net_device *vf_netdev, + struct net_device *ndev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); cancel_delayed_work_sync(&net_device_ctx->vf_takeover); + return 0; +} + +static int netvsc_unregister_vf(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *net_device_ctx; + + net_device_ctx = netdev_priv(ndev); + netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); - netdev_rx_handler_unregister(vf_netdev); - netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); - return NOTIFY_OK; + return 0; } +static struct failover_ops netvsc_failover_ops = { + .slave_pre_register = netvsc_pre_register_vf, + .slave_register = netvsc_register_vf, + .slave_pre_unregister = netvsc_pre_unregister_vf, + .slave_unregister = netvsc_unregister_vf, + .slave_link_change = netvsc_vf_changed, + .slave_handle_frame = netvsc_vf_handle_frame, +}; + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { @@ -2099,8 +2030,14 @@ static int netvsc_probe(struct hv_device *dev, goto register_failed; } + net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); + if (IS_ERR(net_device_ctx->failover)) + goto err_failover; + return ret; +err_failover: + unregister_netdev(net); register_failed: rndis_filter_device_remove(dev, nvdev); rndis_failed: @@ -2141,13 +2078,15 @@ static int netvsc_remove(struct hv_device *dev) rtnl_lock(); vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) - netvsc_unregister_vf(vf_netdev); + failover_slave_unregister(vf_netdev); if (nvdev) rndis_filter_device_remove(dev, nvdev); unregister_netdevice(net); + failover_unregister(ndev_ctx->failover); + rtnl_unlock(); rcu_read_unlock(); @@ -2174,54 +2113,8 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; -/* - * On Hyper-V, every VF interface is matched with a corresponding - * synthetic interface. The synthetic interface is presented first - * to the guest. When the corresponding VF instance is registered, - * we will take care of switching the data path. - */ -static int netvsc_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); - - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if ((event_dev->priv_flags & IFF_BONDING) && - (event_dev->flags & IFF_MASTER)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); - case NETDEV_UNREGISTER: - return netvsc_unregister_vf(event_dev); - case NETDEV_UP: - case NETDEV_DOWN: - return netvsc_vf_changed(event_dev); - default: - return NOTIFY_DONE; - } -} - -static struct notifier_block netvsc_netdev_notifier = { - .notifier_call = netvsc_netdev_event, -}; - static void __exit netvsc_drv_exit(void) { - unregister_netdevice_notifier(&netvsc_netdev_notifier); vmbus_driver_unregister(&netvsc_drv); } @@ -2241,7 +2134,6 @@ static int __init netvsc_drv_init(void) if (ret) return ret; - register_netdevice_notifier(&netvsc_netdev_notifier); return 0; } -- cgit v1.2.3 From cfc80d9a11635404a40199a1c9471c96890f3f74 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:15 -0700 Subject: net: Introduce net_failover driver The net_failover driver provides an automated failover mechanism via APIs to create and destroy a failover master netdev and manages a primary and standby slave netdevs that get registered via the generic failover infrastructure. The failover netdev acts a master device and controls 2 slave devices. The original paravirtual interface gets registered as 'standby' slave netdev and a passthru/vf device with the same MAC gets registered as 'primary' slave netdev. Both 'standby' and 'failover' netdevs are associated with the same 'pci' device. The user accesses the network interface via 'failover' netdev. The 'failover' netdev chooses 'primary' netdev as default for transmits when it is available with link up and running. This can be used by paravirtual drivers to enable an alternate low latency datapath. It also enables hypervisor controlled live migration of a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- Documentation/networking/net_failover.rst | 26 + MAINTAINERS | 8 + drivers/net/Kconfig | 12 + drivers/net/Makefile | 1 + drivers/net/net_failover.c | 836 ++++++++++++++++++++++++++++++ include/net/net_failover.h | 40 ++ 6 files changed, 923 insertions(+) create mode 100644 Documentation/networking/net_failover.rst create mode 100644 drivers/net/net_failover.c create mode 100644 include/net/net_failover.h (limited to 'drivers/net') diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst new file mode 100644 index 000000000000..d4513ad31809 --- /dev/null +++ b/Documentation/networking/net_failover.rst @@ -0,0 +1,26 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============ +NET_FAILOVER +============ + +Overview +======== + +The net_failover driver provides an automated failover mechanism via APIs +to create and destroy a failover master netdev and mananges a primary and +standby slave netdevs that get registered via the generic failover +infrastructrure. + +The failover netdev acts a master device and controls 2 slave devices. The +original paravirtual interface is registered as 'standby' slave netdev and +a passthru/vf device with the same MAC gets registered as 'primary' slave +netdev. Both 'standby' and 'failover' netdevs are associated with the same +'pci' device. The user accesses the network interface via 'failover' netdev. +The 'failover' netdev chooses 'primary' netdev as default for transmits when +it is available with link up and running. + +This can be used by paravirtual drivers to enable an alternate low latency +datapath. It also enables hypervisor controlled live migration of a VM with +direct attached VF by failing over to the paravirtual datapath when the VF +is unplugged. diff --git a/MAINTAINERS b/MAINTAINERS index 6c59bdf49a8a..1831ff5863a1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9654,6 +9654,14 @@ S: Maintained F: Documentation/hwmon/nct6775 F: drivers/hwmon/nct6775.c +NET_FAILOVER MODULE +M: Sridhar Samudrala +L: netdev@vger.kernel.org +S: Supported +F: driver/net/net_failover.c +F: include/net/net_failover.h +F: Documentation/networking/net_failover.rst + NETEFFECT IWARP RNIC DRIVER (IW_NES) M: Faisal Latif L: linux-rdma@vger.kernel.org diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a029b27fd002..2cdaff90a9ec 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -510,4 +510,16 @@ config NETDEVSIM To compile this driver as a module, choose M here: the module will be called netdevsim. +config NET_FAILOVER + tristate "Failover driver" + select FAILOVER + help + This provides an automated failover mechanism via APIs to create + and destroy a failover master netdev and manages a primary and + standby slave netdevs that get registered via the generic failover + infrastructure. This can be used by paravirtual drivers to enable + an alternate low latency datapath. It alsoenables live migration of + a VM with direct attached VF by failing over to the paravirtual + datapath when the VF is unplugged. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 91e67e375dd4..21cde7e78621 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/ thunderbolt-net-y += thunderbolt.o obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o obj-$(CONFIG_NETDEVSIM) += netdevsim/ +obj-$(CONFIG_NET_FAILOVER) += net_failover.o diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c new file mode 100644 index 000000000000..8b508e2cf29b --- /dev/null +++ b/drivers/net/net_failover.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* This provides a net_failover interface for paravirtual drivers to + * provide an alternate datapath by exporting APIs to create and + * destroy a upper 'net_failover' netdev. The upper dev manages the + * original paravirtual interface as a 'standby' netdev and uses the + * generic failover infrastructure to register and manage a direct + * attached VF as a 'primary' netdev. This enables live migration of + * a VM with direct attached VF by failing over to the paravirtual + * datapath when the VF is unplugged. + * + * Some of the netdev management routines are based on bond/team driver as + * this driver provides active-backup functionality similar to those drivers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool net_failover_xmit_ready(struct net_device *dev) +{ + return netif_running(dev) && netif_carrier_ok(dev); +} + +static int net_failover_open(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int err; + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + if (primary_dev) { + err = dev_open(primary_dev); + if (err) + goto err_primary_open; + } + + standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (standby_dev) { + err = dev_open(standby_dev); + if (err) + goto err_standby_open; + } + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } + + return 0; + +err_standby_open: + dev_close(primary_dev); +err_primary_open: + netif_tx_disable(dev); + return err; +} + +static int net_failover_close(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + netif_tx_disable(dev); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + dev_close(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + dev_close(slave_dev); + + return 0; +} + +static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *xmit_dev; + + /* Try xmit via primary netdev followed by standby netdev */ + xmit_dev = rcu_dereference_bh(nfo_info->primary_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) { + xmit_dev = rcu_dereference_bh(nfo_info->standby_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) + return net_failover_drop_xmit(skb, dev); + } + + skb->dev = xmit_dev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + return dev_queue_xmit(skb); +} + +static u16 net_failover_select_queue(struct net_device *dev, + struct sk_buff *skb, void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev; + u16 txq; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + const struct net_device_ops *ops = primary_dev->netdev_ops; + + if (ops->ndo_select_queue) + txq = ops->ndo_select_queue(primary_dev, skb, + accel_priv, fallback); + else + txq = fallback(primary_dev, skb); + + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + return txq; + } + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + + /* Save the original txq to restore before passing to the driver */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + if (unlikely(txq >= dev->real_num_tx_queues)) { + do { + txq -= dev->real_num_tx_queues; + } while (txq >= dev->real_num_tx_queues); + } + + return txq; +} + +/* fold stats, assuming all rtnl_link_stats64 fields are u64, but + * that some drivers can provide 32bit values only. + */ +static void net_failover_fold_stats(struct rtnl_link_stats64 *_res, + const struct rtnl_link_stats64 *_new, + const struct rtnl_link_stats64 *_old) +{ + const u64 *new = (const u64 *)_new; + const u64 *old = (const u64 *)_old; + u64 *res = (u64 *)_res; + int i; + + for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { + u64 nv = new[i]; + u64 ov = old[i]; + s64 delta = nv - ov; + + /* detects if this particular field is 32bit only */ + if (((nv | ov) >> 32) == 0) + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; + } +} + +static void net_failover_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + const struct rtnl_link_stats64 *new; + struct rtnl_link_stats64 temp; + struct net_device *slave_dev; + + spin_lock(&nfo_info->stats_lock); + memcpy(stats, &nfo_info->failover_stats, sizeof(*stats)); + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->primary_stats); + memcpy(&nfo_info->primary_stats, new, sizeof(*new)); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->standby_stats); + memcpy(&nfo_info->standby_stats, new, sizeof(*new)); + } + + rcu_read_unlock(); + + memcpy(&nfo_info->failover_stats, stats, sizeof(*stats)); + spin_unlock(&nfo_info->stats_lock); +} + +static int net_failover_change_mtu(struct net_device *dev, int new_mtu) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = dev_set_mtu(primary_dev, new_mtu); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = dev_set_mtu(standby_dev, new_mtu); + if (ret) { + if (primary_dev) + dev_set_mtu(primary_dev, dev->mtu); + return ret; + } + } + + dev->mtu = new_mtu; + + return 0; +} + +static void net_failover_set_rx_mode(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + rcu_read_unlock(); +} + +static int net_failover_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = vlan_vid_add(primary_dev, proto, vid); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = vlan_vid_add(standby_dev, proto, vid); + if (ret) + if (primary_dev) + vlan_vid_del(primary_dev, proto, vid); + } + + return ret; +} + +static int net_failover_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + return 0; +} + +static const struct net_device_ops failover_dev_ops = { + .ndo_open = net_failover_open, + .ndo_stop = net_failover_close, + .ndo_start_xmit = net_failover_start_xmit, + .ndo_select_queue = net_failover_select_queue, + .ndo_get_stats64 = net_failover_get_stats, + .ndo_change_mtu = net_failover_change_mtu, + .ndo_set_rx_mode = net_failover_set_rx_mode, + .ndo_vlan_rx_add_vid = net_failover_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = net_failover_vlan_rx_kill_vid, + .ndo_validate_addr = eth_validate_addr, + .ndo_features_check = passthru_features_check, +}; + +#define FAILOVER_NAME "net_failover" +#define FAILOVER_VERSION "0.1" + +static void nfo_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version)); +} + +static int nfo_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + } + + return __ethtool_get_link_ksettings(slave_dev, cmd); +} + +static const struct ethtool_ops failover_ethtool_ops = { + .get_drvinfo = nfo_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = nfo_ethtool_get_link_ksettings, +}; + +/* Called when slave dev is injecting data into network stack. + * Change the associated network device from lower dev to failover dev. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data); + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + standby_dev = rcu_dereference(nfo_info->standby_dev); + + if (primary_dev && skb->dev == standby_dev) + return RX_HANDLER_EXACT; + + skb->dev = dev; + + return RX_HANDLER_ANOTHER; +} + +static void net_failover_compute_features(struct net_device *dev) +{ + u32 vlan_features = FAILOVER_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t enc_features = FAILOVER_ENC_FEATURES; + unsigned short max_hard_header_len = ETH_HLEN; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + vlan_features = + netdev_increment_features(vlan_features, + primary_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + primary_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= primary_dev->priv_flags; + if (primary_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = primary_dev->hard_header_len; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + vlan_features = + netdev_increment_features(vlan_features, + standby_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + standby_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= standby_dev->priv_flags; + if (standby_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = standby_dev->hard_header_len; + } + + dev->vlan_features = vlan_features; + dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + dev->hard_header_len = max_hard_header_len; + + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM)) + dev->priv_flags |= IFF_XMIT_DST_RELEASE; + + netdev_change_features(dev); +} + +static void net_failover_lower_state_changed(struct net_device *slave_dev, + struct net_device *primary_dev, + struct net_device *standby_dev) +{ + struct netdev_lag_lower_state_info info; + + if (netif_carrier_ok(slave_dev)) + info.link_up = true; + else + info.link_up = false; + + if (slave_dev == primary_dev) { + if (netif_running(primary_dev)) + info.tx_enabled = true; + else + info.tx_enabled = false; + } else { + if ((primary_dev && netif_running(primary_dev)) || + (!netif_running(standby_dev))) + info.tx_enabled = false; + else + info.tx_enabled = true; + } + + netdev_lower_state_changed(slave_dev, &info); +} + +static int net_failover_slave_pre_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby ? standby_dev : primary_dev) { + netdev_err(failover_dev, "%s attempting to register as slave dev when %s already present\n", + slave_dev->name, + slave_is_standby ? "standby" : "primary"); + return -EINVAL; + } + + /* We want to allow only a direct attached VF device as a primary + * netdev. As there is no easy way to check for a VF device, restrict + * this to a pci device. + */ + if (!slave_is_standby && (!slave_dev->dev.parent || + !dev_is_pci(slave_dev->dev.parent))) + return -EINVAL; + + if (failover_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(failover_dev)) { + netdev_err(failover_dev, "Device %s is VLAN challenged and failover device has VLAN set up\n", + failover_dev->name); + return -EINVAL; + } + + return 0; +} + +static int net_failover_slave_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + u32 orig_mtu; + int err; + + /* Align MTU of slave with failover dev */ + orig_mtu = slave_dev->mtu; + err = dev_set_mtu(slave_dev, failover_dev->mtu); + if (err) { + netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n", + slave_dev->name, failover_dev->mtu); + goto done; + } + + dev_hold(slave_dev); + + if (netif_running(failover_dev)) { + err = dev_open(slave_dev); + if (err && (err != -EBUSY)) { + netdev_err(failover_dev, "Opening slave %s failed err:%d\n", + slave_dev->name, err); + goto err_dev_open; + } + } + + netif_addr_lock_bh(failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + netif_addr_unlock_bh(failover_dev); + + err = vlan_vids_add_by_dev(slave_dev, failover_dev); + if (err) { + netdev_err(failover_dev, "Failed to add vlan ids to device %s err:%d\n", + slave_dev->name, err); + goto err_vlan_add; + } + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + + if (slave_is_standby) { + rcu_assign_pointer(nfo_info->standby_dev, slave_dev); + standby_dev = slave_dev; + dev_get_stats(standby_dev, &nfo_info->standby_stats); + } else { + rcu_assign_pointer(nfo_info->primary_dev, slave_dev); + primary_dev = slave_dev; + dev_get_stats(primary_dev, &nfo_info->primary_stats); + failover_dev->min_mtu = slave_dev->min_mtu; + failover_dev->max_mtu = slave_dev->max_mtu; + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + net_failover_compute_features(failover_dev); + + call_netdevice_notifiers(NETDEV_JOIN, slave_dev); + + netdev_info(failover_dev, "failover %s slave:%s registered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; + +err_vlan_add: + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); +err_dev_open: + dev_put(slave_dev); + dev_set_mtu(slave_dev, orig_mtu); +done: + return err; +} + +static int net_failover_slave_pre_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + return 0; +} + +static int net_failover_slave_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + vlan_vids_del_by_dev(slave_dev, failover_dev); + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); + + nfo_info = netdev_priv(failover_dev); + dev_get_stats(failover_dev, &nfo_info->failover_stats); + + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby) { + RCU_INIT_POINTER(nfo_info->standby_dev, NULL); + } else { + RCU_INIT_POINTER(nfo_info->primary_dev, NULL); + if (standby_dev) { + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + } + } + + dev_put(slave_dev); + + net_failover_compute_features(failover_dev); + + netdev_info(failover_dev, "failover %s slave:%s unregistered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; +} + +static int net_failover_slave_link_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(failover_dev); + netif_tx_wake_all_queues(failover_dev); + } else { + dev_get_stats(failover_dev, &nfo_info->failover_stats); + netif_carrier_off(failover_dev); + netif_tx_stop_all_queues(failover_dev); + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + + return 0; +} + +static int net_failover_slave_name_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + /* We need to bring up the slave after the rename by udev in case + * open failed with EBUSY when it was registered. + */ + dev_open(slave_dev); + + return 0; +} + +static struct failover_ops net_failover_ops = { + .slave_pre_register = net_failover_slave_pre_register, + .slave_register = net_failover_slave_register, + .slave_pre_unregister = net_failover_slave_pre_unregister, + .slave_unregister = net_failover_slave_unregister, + .slave_link_change = net_failover_slave_link_change, + .slave_name_change = net_failover_slave_name_change, + .slave_handle_frame = net_failover_handle_frame, +}; + +/** + * net_failover_create - Create and register a failover instance + * + * @dev: standby netdev + * + * Creates a failover netdev and registers a failover instance for a standby + * netdev. Used by paravirtual drivers that use 3-netdev model. + * The failover netdev acts as a master device and controls 2 slave devices - + * the original standby netdev and a VF netdev with the same MAC gets + * registered as primary netdev. + * + * Return: pointer to failover instance + */ +struct failover *net_failover_create(struct net_device *standby_dev) +{ + struct device *dev = standby_dev->dev.parent; + struct net_device *failover_dev; + struct failover *failover; + int err; + + /* Alloc at least 2 queues, for now we are going with 16 assuming + * that VF devices being enslaved won't have too many queues. + */ + failover_dev = alloc_etherdev_mq(sizeof(struct net_failover_info), 16); + if (!failover_dev) { + dev_err(dev, "Unable to allocate failover_netdev!\n"); + return ERR_PTR(-ENOMEM); + } + + dev_net_set(failover_dev, dev_net(standby_dev)); + SET_NETDEV_DEV(failover_dev, dev); + + failover_dev->netdev_ops = &failover_dev_ops; + failover_dev->ethtool_ops = &failover_ethtool_ops; + + /* Initialize the device options */ + failover_dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; + failover_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | + IFF_TX_SKB_SHARING); + + /* don't acquire failover netdev's netif_tx_lock when transmitting */ + failover_dev->features |= NETIF_F_LLTX; + + /* Don't allow failover devices to change network namespaces. */ + failover_dev->features |= NETIF_F_NETNS_LOCAL; + + failover_dev->hw_features = FAILOVER_VLAN_FEATURES | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + failover_dev->features |= failover_dev->hw_features; + + memcpy(failover_dev->dev_addr, standby_dev->dev_addr, + failover_dev->addr_len); + + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + + err = register_netdev(failover_dev); + if (err) { + dev_err(dev, "Unable to register failover_dev!\n"); + goto err_register_netdev; + } + + netif_carrier_off(failover_dev); + + failover = failover_register(failover_dev, &net_failover_ops); + if (IS_ERR(failover)) + goto err_failover_register; + + return failover; + +err_failover_register: + unregister_netdev(failover_dev); +err_register_netdev: + free_netdev(failover_dev); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(net_failover_create); + +/** + * net_failover_destroy - Destroy a failover instance + * + * @failover: pointer to failover instance + * + * Unregisters any slave netdevs associated with the failover instance by + * calling failover_slave_unregister(). + * unregisters the failover instance itself and finally frees the failover + * netdev. Used by paravirtual drivers that use 3-netdev model. + * + */ +void net_failover_destroy(struct failover *failover) +{ + struct net_failover_info *nfo_info; + struct net_device *failover_dev; + struct net_device *slave_dev; + + if (!failover) + return; + + failover_dev = rcu_dereference(failover->failover_dev); + nfo_info = netdev_priv(failover_dev); + + netif_device_detach(failover_dev); + + rtnl_lock(); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + failover_unregister(failover); + + unregister_netdevice(failover_dev); + + rtnl_unlock(); + + free_netdev(failover_dev); +} +EXPORT_SYMBOL_GPL(net_failover_destroy); + +static __init int +net_failover_init(void) +{ + return 0; +} +module_init(net_failover_init); + +static __exit +void net_failover_exit(void) +{ +} +module_exit(net_failover_exit); + +MODULE_DESCRIPTION("Failover driver for Paravirtual drivers"); +MODULE_LICENSE("GPL v2"); diff --git a/include/net/net_failover.h b/include/net/net_failover.h new file mode 100644 index 000000000000..b12a1c469d1c --- /dev/null +++ b/include/net/net_failover.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _NET_FAILOVER_H +#define _NET_FAILOVER_H + +#include + +/* failover state */ +struct net_failover_info { + /* primary netdev with same MAC */ + struct net_device __rcu *primary_dev; + + /* standby netdev */ + struct net_device __rcu *standby_dev; + + /* primary netdev stats */ + struct rtnl_link_stats64 primary_stats; + + /* standby netdev stats */ + struct rtnl_link_stats64 standby_stats; + + /* aggregated stats */ + struct rtnl_link_stats64 failover_stats; + + /* spinlock while updating stats */ + spinlock_t stats_lock; +}; + +struct failover *net_failover_create(struct net_device *standby_dev); +void net_failover_destroy(struct failover *failover); + +#define FAILOVER_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_HIGHDMA | NETIF_F_LRO) + +#define FAILOVER_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_RXCSUM | NETIF_F_ALL_TSO) + +#endif /* _NET_FAILOVER_H */ -- cgit v1.2.3 From 9805069d14c1b0b66b1600ea60cfc08f94841bd8 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:16 -0700 Subject: virtio_net: Introduce VIRTIO_NET_F_STANDBY feature bit This feature bit can be used by hypervisor to indicate virtio_net device to act as a standby for another device with the same MAC address. VIRTIO_NET_F_STANDBY is defined as bit 62 as it is a device feature bit. Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 2 +- include/uapi/linux/virtio_net.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b2647dd5d302..fe4534ee50a1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3030,7 +3030,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ - VIRTIO_NET_F_SPEED_DUPLEX + VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 5de6ed37695b..a3715a3224c1 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -57,6 +57,9 @@ * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device + * with the same MAC. + */ #define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */ #ifndef VIRTIO_NET_NO_LEGACY -- cgit v1.2.3 From ba5e4426e80e0435358c7117c339e6a4c22c34ad Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:17 -0700 Subject: virtio_net: Extend virtio to use VF datapath when available This patch enables virtio_net to switch over to a VF datapath when STANDBY feature is enabled and a VF netdev is present with the same MAC address. It allows live migration of a VM with a direct attached VF without the need to setup a bond/team between a VF and virtio net device in the guest. It uses the API that is exported by the net_failover driver to create and and destroy a master failover netdev. When STANDBY feature is enabled, an additional netdev(failover netdev) is created that acts as a master device and tracks the state of the 2 lower netdevs. The original virtio_net netdev is marked as 'standby' netdev and a passthru device with the same MAC is registered as 'primary' netdev. The hypervisor needs to unplug the VF device from the guest on the source host and reset the MAC filter of the VF to initiate failover of datapath to virtio before starting the migration. After the migration is completed, the destination hypervisor sets the MAC filter on the VF and plugs it back to the guest to switch over to VF datapath. This patch is based on the discussion initiated by Jesse on this thread. https://marc.info/?l=linux-virtualization&m=151189725224231&w=2 Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- Documentation/networking/net_failover.rst | 90 +++++++++++++++++++++++++++++++ drivers/net/Kconfig | 1 + drivers/net/virtio_net.c | 38 ++++++++++++- 3 files changed, 128 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst index d4513ad31809..70ca2f5800c4 100644 --- a/Documentation/networking/net_failover.rst +++ b/Documentation/networking/net_failover.rst @@ -24,3 +24,93 @@ This can be used by paravirtual drivers to enable an alternate low latency datapath. It also enables hypervisor controlled live migration of a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. + +virtio-net accelerated datapath: STANDBY mode +============================================= + +net_failover enables hypervisor controlled accelerated datapath to virtio-net +enabled VMs in a transparent manner with no/minimal guest userspace chanages. + +To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY +feature on the virtio-net interface and assign the same MAC address to both +virtio-net and VF interfaces. + +Here is an example XML snippet that shows such configuration. + + + + + + + + +
+ + + + +
+ +
+ + +Booting a VM with the above configuration will result in the following 3 +netdevs created in the VM. + +4: ens10: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10 + valid_lft 42482sec preferred_lft 42482sec + inet6 fe80::97d8:db2:8c10:b6d6/64 scope link + valid_lft forever preferred_lft forever +5: ens10nsby: mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff +7: ens11: mtu 1500 qdisc mq master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + +ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave +'standby' and 'primary' netdevs respectively. + +Live Migration of a VM with SR-IOV VF & virtio-net in STANDBY mode +================================================================== + +net_failover also enables hypervisor controlled live migration to be supported +with VMs that have direct attached SR-IOV VF devices by automatic failover to +the paravirtual datapath when the VF is unplugged. + +Here is a sample script that shows the steps to initiate live migration on +the source hypervisor. + +# cat vf_xml + + + +
+ +
+ + +# Source Hypervisor +#!/bin/bash + +DOMAIN=fedora27-tap01 +PF=enp66s0f0 +VF_NUM=5 +TAP_IF=tap01 +VF_XML= + +MAC=52:54:00:00:12:53 +ZERO_MAC=00:00:00:00:00:00 + +virsh domif-setlink $DOMAIN $TAP_IF up +bridge fdb del $MAC dev $PF master +virsh detach-device $DOMAIN $VF_XML +ip link set $PF vf $VF_NUM mac $ZERO_MAC + +virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system + +# Destination Hypervisor +#!/bin/bash + +virsh attach-device $DOMAIN $VF_XML +virsh domif-setlink $DOMAIN $TAP_IF down diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2cdaff90a9ec..d03775100f7d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -332,6 +332,7 @@ config VETH config VIRTIO_NET tristate "Virtio network driver" depends on VIRTIO + select NET_FAILOVER ---help--- This is the virtual network driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). Say Y or M. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fe4534ee50a1..8f08a3e1bbaa 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -30,8 +30,11 @@ #include #include #include +#include +#include #include #include +#include static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -210,6 +213,9 @@ struct virtnet_info { u32 speed; unsigned long guest_offloads; + + /* failover when STANDBY feature enabled */ + struct failover *failover; }; struct padded_vnet_hdr { @@ -1554,6 +1560,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) struct sockaddr *addr; struct scatterlist sg; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) + return -EOPNOTSUPP; + addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; @@ -2337,6 +2346,22 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct virtnet_info *vi = netdev_priv(dev); + int ret; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) + return -EOPNOTSUPP; + + ret = snprintf(buf, len, "sby"); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -2354,6 +2379,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_xdp_flush = virtnet_xdp_flush, .ndo_features_check = passthru_features_check, + .ndo_get_phys_port_name = virtnet_get_phys_port_name, }; static void virtnet_config_changed_work(struct work_struct *work) @@ -2907,10 +2933,16 @@ static int virtnet_probe(struct virtio_device *vdev) virtnet_init_settings(dev); + if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { + vi->failover = net_failover_create(vi->dev); + if (IS_ERR(vi->failover)) + goto free_vqs; + } + err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); - goto free_vqs; + goto free_failover; } virtio_device_ready(vdev); @@ -2947,6 +2979,8 @@ free_unregister_netdev: vi->vdev->config->reset(vdev); unregister_netdev(dev); +free_failover: + net_failover_destroy(vi->failover); free_vqs: cancel_delayed_work_sync(&vi->refill); free_receive_page_frags(vi); @@ -2981,6 +3015,8 @@ static void virtnet_remove(struct virtio_device *vdev) unregister_netdev(vi->dev); + net_failover_destroy(vi->failover); + remove_vq_common(vi); free_netdev(vi->dev); -- cgit v1.2.3 From 049ff57a2aa6ead1daf16b4d11da44ed17bdb0b5 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 24 May 2018 22:40:12 +0200 Subject: net: phy: realtek: add suspend/resume callbacks for RTL8211B Add RTL8211B suspend / resume callbacks. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 9f48ecf9c627..082fb40c656d 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -145,6 +145,20 @@ static int rtl8211f_config_init(struct phy_device *phydev) return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val); } +static int rtl8211b_suspend(struct phy_device *phydev) +{ + phy_write(phydev, MII_MMD_DATA, BIT(9)); + + return genphy_suspend(phydev); +} + +static int rtl8211b_resume(struct phy_device *phydev) +{ + phy_write(phydev, MII_MMD_DATA, 0); + + return genphy_resume(phydev); +} + static struct phy_driver realtek_drvs[] = { { .phy_id = 0x00008201, @@ -174,6 +188,8 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211b_config_intr, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, + .suspend = rtl8211b_suspend, + .resume = rtl8211b_resume, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", -- cgit v1.2.3 From b1d2e4e03f92734ff524f96c4b2287133de7a4a3 Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Fri, 25 May 2018 07:38:29 +1000 Subject: ifb: fix packets checksum Fixup the checksum for CHECKSUM_COMPLETE when pulling skbs on RX path. Otherwise we get splats when tc mirred is used to redirect packets to ifb. Before fix: nic: hw csum failure Signed-off-by: Jon Maxwell Signed-off-by: David S. Miller --- drivers/net/ifb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 5f2897ec0edc..d345c61d476c 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -102,7 +102,7 @@ static void ifb_ri_tasklet(unsigned long _txp) if (!skb->tc_from_ingress) { dev_queue_xmit(skb); } else { - skb_pull(skb, skb->mac_len); + skb_pull_rcsum(skb, skb->mac_len); netif_receive_skb(skb); } } -- cgit v1.2.3 From ceefc71d4c055dab2bba2d2bfa6e7c2855154a24 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Fri, 25 May 2018 12:40:34 +0800 Subject: ptp: rework gianfar_ptp as QorIQ common PTP driver gianfar_ptp was the PTP clock driver for 1588 timer module of Freescale QorIQ eTSEC (Enhanced Three-Speed Ethernet Controllers) platforms. Actually QorIQ DPAA (Data Path Acceleration Architecture) platforms is also using the same 1588 timer module in hardware. This patch is to rework gianfar_ptp as QorIQ common PTP driver to support both DPAA and eTSEC. Moved gianfar_ptp.c to drivers/ptp/, renamed it as ptp_qoriq.c, and renamed many variables. There were not any function changes. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/Makefile | 1 - drivers/net/ethernet/freescale/gianfar_ptp.c | 572 -------------------------- drivers/ptp/Kconfig | 14 +- drivers/ptp/Makefile | 1 + drivers/ptp/ptp_qoriq.c | 584 +++++++++++++++++++++++++++ 5 files changed, 592 insertions(+), 580 deletions(-) delete mode 100644 drivers/net/ethernet/freescale/gianfar_ptp.c create mode 100644 drivers/ptp/ptp_qoriq.c (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index ed8ad0fefbda..0914a3ea4405 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o -obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c deleted file mode 100644 index 9f8d4f8e57e3..000000000000 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * PTP 1588 clock using the eTSEC - * - * Copyright (C) 2010 OMICRON electronics GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "gianfar.h" - -/* - * gianfar ptp registers - * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 - */ -struct gianfar_ptp_registers { - u32 tmr_ctrl; /* Timer control register */ - u32 tmr_tevent; /* Timestamp event register */ - u32 tmr_temask; /* Timer event mask register */ - u32 tmr_pevent; /* Timestamp event register */ - u32 tmr_pemask; /* Timer event mask register */ - u32 tmr_stat; /* Timestamp status register */ - u32 tmr_cnt_h; /* Timer counter high register */ - u32 tmr_cnt_l; /* Timer counter low register */ - u32 tmr_add; /* Timer drift compensation addend register */ - u32 tmr_acc; /* Timer accumulator register */ - u32 tmr_prsc; /* Timer prescale */ - u8 res1[4]; - u32 tmroff_h; /* Timer offset high */ - u32 tmroff_l; /* Timer offset low */ - u8 res2[8]; - u32 tmr_alarm1_h; /* Timer alarm 1 high register */ - u32 tmr_alarm1_l; /* Timer alarm 1 high register */ - u32 tmr_alarm2_h; /* Timer alarm 2 high register */ - u32 tmr_alarm2_l; /* Timer alarm 2 high register */ - u8 res3[48]; - u32 tmr_fiper1; /* Timer fixed period interval */ - u32 tmr_fiper2; /* Timer fixed period interval */ - u32 tmr_fiper3; /* Timer fixed period interval */ - u8 res4[20]; - u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ -}; - -/* Bit definitions for the TMR_CTRL register */ -#define ALM1P (1<<31) /* Alarm1 output polarity */ -#define ALM2P (1<<30) /* Alarm2 output polarity */ -#define FIPERST (1<<28) /* FIPER start indication */ -#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ -#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ -#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ -#define TCLK_PERIOD_MASK (0x3ff) -#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ -#define FRD (1<<14) /* FIPER Realignment Disable */ -#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ -#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ -#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ -#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ -#define COPH (1<<7) /* Generated clock output phase. */ -#define CIPH (1<<6) /* External oscillator input clock phase */ -#define TMSR (1<<5) /* Timer soft reset. */ -#define BYP (1<<3) /* Bypass drift compensated clock */ -#define TE (1<<2) /* 1588 timer enable. */ -#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ -#define CKSEL_MASK (0x3) - -/* Bit definitions for the TMR_TEVENT register */ -#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ -#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ -#define ALM2 (1<<17) /* Current time = alarm time register 2 */ -#define ALM1 (1<<16) /* Current time = alarm time register 1 */ -#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ -#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ -#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ - -/* Bit definitions for the TMR_TEMASK register */ -#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ -#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ -#define ALM2EN (1<<17) /* Timer ALM2 event enable */ -#define ALM1EN (1<<16) /* Timer ALM1 event enable */ -#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ -#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ - -/* Bit definitions for the TMR_PEVENT register */ -#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ -#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ -#define RXP (1<<0) /* PTP frame has been received */ - -/* Bit definitions for the TMR_PEMASK register */ -#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ -#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ -#define RXPEN (1<<0) /* Receive PTP packet event enable */ - -/* Bit definitions for the TMR_STAT register */ -#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ -#define STAT_VEC_MASK (0x3f) - -/* Bit definitions for the TMR_PRSC register */ -#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ -#define PRSC_OCK_MASK (0xffff) - - -#define DRIVER "gianfar_ptp" -#define DEFAULT_CKSEL 1 -#define N_EXT_TS 2 -#define REG_SIZE sizeof(struct gianfar_ptp_registers) - -struct etsects { - struct gianfar_ptp_registers __iomem *regs; - spinlock_t lock; /* protects regs */ - struct ptp_clock *clock; - struct ptp_clock_info caps; - struct resource *rsrc; - int irq; - u64 alarm_interval; /* for periodic alarm */ - u64 alarm_value; - u32 tclk_period; /* nanoseconds */ - u32 tmr_prsc; - u32 tmr_add; - u32 cksel; - u32 tmr_fiper1; - u32 tmr_fiper2; -}; - -/* - * Register access functions - */ - -/* Caller must hold etsects->lock. */ -static u64 tmr_cnt_read(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - lo = gfar_read(&etsects->regs->tmr_cnt_l); - hi = gfar_read(&etsects->regs->tmr_cnt_h); - ns = ((u64) hi) << 32; - ns |= lo; - return ns; -} - -/* Caller must hold etsects->lock. */ -static void tmr_cnt_write(struct etsects *etsects, u64 ns) -{ - u32 hi = ns >> 32; - u32 lo = ns & 0xffffffff; - - gfar_write(&etsects->regs->tmr_cnt_l, lo); - gfar_write(&etsects->regs->tmr_cnt_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_alarm(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - ns = tmr_cnt_read(etsects) + 1500000000ULL; - ns = div_u64(ns, 1000000000UL) * 1000000000ULL; - ns -= etsects->tclk_period; - hi = ns >> 32; - lo = ns & 0xffffffff; - gfar_write(&etsects->regs->tmr_alarm1_l, lo); - gfar_write(&etsects->regs->tmr_alarm1_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_fipers(struct etsects *etsects) -{ - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); -} - -/* - * Interrupt service routine - */ - -static irqreturn_t isr(int irq, void *priv) -{ - struct etsects *etsects = priv; - struct ptp_clock_event event; - u64 ns; - u32 ack = 0, lo, hi, mask, val; - - val = gfar_read(&etsects->regs->tmr_tevent); - - if (val & ETS1) { - ack |= ETS1; - hi = gfar_read(&etsects->regs->tmr_etts1_h); - lo = gfar_read(&etsects->regs->tmr_etts1_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ETS2) { - ack |= ETS2; - hi = gfar_read(&etsects->regs->tmr_etts2_h); - lo = gfar_read(&etsects->regs->tmr_etts2_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ALM2) { - ack |= ALM2; - if (etsects->alarm_value) { - event.type = PTP_CLOCK_ALARM; - event.index = 0; - event.timestamp = etsects->alarm_value; - ptp_clock_event(etsects->clock, &event); - } - if (etsects->alarm_interval) { - ns = etsects->alarm_value + etsects->alarm_interval; - hi = ns >> 32; - lo = ns & 0xffffffff; - spin_lock(&etsects->lock); - gfar_write(&etsects->regs->tmr_alarm2_l, lo); - gfar_write(&etsects->regs->tmr_alarm2_h, hi); - spin_unlock(&etsects->lock); - etsects->alarm_value = ns; - } else { - gfar_write(&etsects->regs->tmr_tevent, ALM2); - spin_lock(&etsects->lock); - mask = gfar_read(&etsects->regs->tmr_temask); - mask &= ~ALM2EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock(&etsects->lock); - etsects->alarm_value = 0; - etsects->alarm_interval = 0; - } - } - - if (val & PP1) { - ack |= PP1; - event.type = PTP_CLOCK_PPS; - ptp_clock_event(etsects->clock, &event); - } - - if (ack) { - gfar_write(&etsects->regs->tmr_tevent, ack); - return IRQ_HANDLED; - } else - return IRQ_NONE; -} - -/* - * PTP clock operations - */ - -static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) -{ - u64 adj, diff; - u32 tmr_add; - int neg_adj = 0; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - if (scaled_ppm < 0) { - neg_adj = 1; - scaled_ppm = -scaled_ppm; - } - tmr_add = etsects->tmr_add; - adj = tmr_add; - - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer - */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); - - tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - - gfar_write(&etsects->regs->tmr_add, tmr_add); - - return 0; -} - -static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - s64 now; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - now = tmr_cnt_read(etsects); - now += delta; - tmr_cnt_write(etsects, now); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - ns = tmr_cnt_read(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -static int ptp_gianfar_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - ns = timespec64_to_ns(ts); - - spin_lock_irqsave(&etsects->lock, flags); - - tmr_cnt_write(etsects, ns); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - struct etsects *etsects = container_of(ptp, struct etsects, caps); - unsigned long flags; - u32 bit, mask; - - switch (rq->type) { - case PTP_CLK_REQ_EXTTS: - switch (rq->extts.index) { - case 0: - bit = ETS1EN; - break; - case 1: - bit = ETS2EN; - break; - default: - return -EINVAL; - } - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= bit; - else - mask &= ~bit; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - case PTP_CLK_REQ_PPS: - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= PP1EN; - else - mask &= ~PP1EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - default: - break; - } - - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info ptp_gianfar_caps = { - .owner = THIS_MODULE, - .name = "gianfar clock", - .max_adj = 512000, - .n_alarm = 0, - .n_ext_ts = N_EXT_TS, - .n_per_out = 0, - .n_pins = 0, - .pps = 1, - .adjfine = ptp_gianfar_adjfine, - .adjtime = ptp_gianfar_adjtime, - .gettime64 = ptp_gianfar_gettime, - .settime64 = ptp_gianfar_settime, - .enable = ptp_gianfar_enable, -}; - -static int gianfar_ptp_probe(struct platform_device *dev) -{ - struct device_node *node = dev->dev.of_node; - struct etsects *etsects; - struct timespec64 now; - int err = -ENOMEM; - u32 tmr_ctrl; - unsigned long flags; - - etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); - if (!etsects) - goto no_memory; - - err = -ENODEV; - - etsects->caps = ptp_gianfar_caps; - - if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel)) - etsects->cksel = DEFAULT_CKSEL; - - if (of_property_read_u32(node, - "fsl,tclk-period", &etsects->tclk_period) || - of_property_read_u32(node, - "fsl,tmr-prsc", &etsects->tmr_prsc) || - of_property_read_u32(node, - "fsl,tmr-add", &etsects->tmr_add) || - of_property_read_u32(node, - "fsl,tmr-fiper1", &etsects->tmr_fiper1) || - of_property_read_u32(node, - "fsl,tmr-fiper2", &etsects->tmr_fiper2) || - of_property_read_u32(node, - "fsl,max-adj", &etsects->caps.max_adj)) { - pr_err("device tree node missing required elements\n"); - goto no_node; - } - - etsects->irq = platform_get_irq(dev, 0); - - if (etsects->irq < 0) { - pr_err("irq not in device tree\n"); - goto no_node; - } - if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { - pr_err("request_irq failed\n"); - goto no_node; - } - - etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!etsects->rsrc) { - pr_err("no resource\n"); - goto no_resource; - } - if (request_resource(&iomem_resource, etsects->rsrc)) { - pr_err("resource busy\n"); - goto no_resource; - } - - spin_lock_init(&etsects->lock); - - etsects->regs = ioremap(etsects->rsrc->start, - resource_size(etsects->rsrc)); - if (!etsects->regs) { - pr_err("ioremap ptp registers failed\n"); - goto no_ioremap; - } - getnstimeofday64(&now); - ptp_gianfar_settime(&etsects->caps, &now); - - tmr_ctrl = - (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | - (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; - - spin_lock_irqsave(&etsects->lock, flags); - - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); - gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); - gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); - - spin_unlock_irqrestore(&etsects->lock, flags); - - etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); - if (IS_ERR(etsects->clock)) { - err = PTR_ERR(etsects->clock); - goto no_clock; - } - gfar_phc_index = ptp_clock_index(etsects->clock); - - platform_set_drvdata(dev, etsects); - - return 0; - -no_clock: - iounmap(etsects->regs); -no_ioremap: - release_resource(etsects->rsrc); -no_resource: - free_irq(etsects->irq, etsects); -no_node: - kfree(etsects); -no_memory: - return err; -} - -static int gianfar_ptp_remove(struct platform_device *dev) -{ - struct etsects *etsects = platform_get_drvdata(dev); - - gfar_write(&etsects->regs->tmr_temask, 0); - gfar_write(&etsects->regs->tmr_ctrl, 0); - - gfar_phc_index = -1; - ptp_clock_unregister(etsects->clock); - iounmap(etsects->regs); - release_resource(etsects->rsrc); - free_irq(etsects->irq, etsects); - kfree(etsects); - - return 0; -} - -static const struct of_device_id match_table[] = { - { .compatible = "fsl,etsec-ptp" }, - {}, -}; -MODULE_DEVICE_TABLE(of, match_table); - -static struct platform_driver gianfar_ptp_driver = { - .driver = { - .name = "gianfar_ptp", - .of_match_table = match_table, - }, - .probe = gianfar_ptp_probe, - .remove = gianfar_ptp_remove, -}; - -module_platform_driver(gianfar_ptp_driver); - -MODULE_AUTHOR("Richard Cochran "); -MODULE_DESCRIPTION("PTP clock using the eTSEC"); -MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index a21ad10d613c..474c988d2e95 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -41,19 +41,19 @@ config PTP_1588_CLOCK_DTE To compile this driver as a module, choose M here: the module will be called ptp_dte. -config PTP_1588_CLOCK_GIANFAR - tristate "Freescale eTSEC as PTP clock" +config PTP_1588_CLOCK_QORIQ + tristate "Freescale QorIQ 1588 timer as PTP clock" depends on GIANFAR depends on PTP_1588_CLOCK default y help - This driver adds support for using the eTSEC as a PTP - clock. This clock is only useful if your PTP programs are - getting hardware time stamps on the PTP Ethernet packets - using the SO_TIMESTAMPING API. + This driver adds support for using the Freescale QorIQ 1588 + timer as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. To compile this driver as a module, choose M here: the module - will be called gianfar_ptp. + will be called ptp_qoriq. config PTP_1588_CLOCK_IXP46X tristate "Intel IXP46x as PTP clock" diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index fd28207f5379..19efa9cfa950 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o +obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c new file mode 100644 index 000000000000..5110cce78fb5 --- /dev/null +++ b/drivers/ptp/ptp_qoriq.c @@ -0,0 +1,584 @@ +/* + * PTP 1588 clock for Freescale QorIQ 1588 timer + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * qoriq ptp registers + * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 + */ +struct qoriq_ptp_registers { + u32 tmr_ctrl; /* Timer control register */ + u32 tmr_tevent; /* Timestamp event register */ + u32 tmr_temask; /* Timer event mask register */ + u32 tmr_pevent; /* Timestamp event register */ + u32 tmr_pemask; /* Timer event mask register */ + u32 tmr_stat; /* Timestamp status register */ + u32 tmr_cnt_h; /* Timer counter high register */ + u32 tmr_cnt_l; /* Timer counter low register */ + u32 tmr_add; /* Timer drift compensation addend register */ + u32 tmr_acc; /* Timer accumulator register */ + u32 tmr_prsc; /* Timer prescale */ + u8 res1[4]; + u32 tmroff_h; /* Timer offset high */ + u32 tmroff_l; /* Timer offset low */ + u8 res2[8]; + u32 tmr_alarm1_h; /* Timer alarm 1 high register */ + u32 tmr_alarm1_l; /* Timer alarm 1 high register */ + u32 tmr_alarm2_h; /* Timer alarm 2 high register */ + u32 tmr_alarm2_l; /* Timer alarm 2 high register */ + u8 res3[48]; + u32 tmr_fiper1; /* Timer fixed period interval */ + u32 tmr_fiper2; /* Timer fixed period interval */ + u32 tmr_fiper3; /* Timer fixed period interval */ + u8 res4[20]; + u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ +}; + +/* Bit definitions for the TMR_CTRL register */ +#define ALM1P (1<<31) /* Alarm1 output polarity */ +#define ALM2P (1<<30) /* Alarm2 output polarity */ +#define FIPERST (1<<28) /* FIPER start indication */ +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ +#define TCLK_PERIOD_MASK (0x3ff) +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ +#define FRD (1<<14) /* FIPER Realignment Disable */ +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ +#define COPH (1<<7) /* Generated clock output phase. */ +#define CIPH (1<<6) /* External oscillator input clock phase */ +#define TMSR (1<<5) /* Timer soft reset. */ +#define BYP (1<<3) /* Bypass drift compensated clock */ +#define TE (1<<2) /* 1588 timer enable. */ +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ +#define CKSEL_MASK (0x3) + +/* Bit definitions for the TMR_TEVENT register */ +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ +#define ALM2 (1<<17) /* Current time = alarm time register 2 */ +#define ALM1 (1<<16) /* Current time = alarm time register 1 */ +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ + +/* Bit definitions for the TMR_TEMASK register */ +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ +#define ALM2EN (1<<17) /* Timer ALM2 event enable */ +#define ALM1EN (1<<16) /* Timer ALM1 event enable */ +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ + +/* Bit definitions for the TMR_PEVENT register */ +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ +#define RXP (1<<0) /* PTP frame has been received */ + +/* Bit definitions for the TMR_PEMASK register */ +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ +#define RXPEN (1<<0) /* Receive PTP packet event enable */ + +/* Bit definitions for the TMR_STAT register */ +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ +#define STAT_VEC_MASK (0x3f) + +/* Bit definitions for the TMR_PRSC register */ +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ +#define PRSC_OCK_MASK (0xffff) + + +#define DRIVER "ptp_qoriq" +#define DEFAULT_CKSEL 1 +#define N_EXT_TS 2 +#define REG_SIZE sizeof(struct qoriq_ptp_registers) + +struct qoriq_ptp { + struct qoriq_ptp_registers __iomem *regs; + spinlock_t lock; /* protects regs */ + struct ptp_clock *clock; + struct ptp_clock_info caps; + struct resource *rsrc; + int irq; + int phc_index; + u64 alarm_interval; /* for periodic alarm */ + u64 alarm_value; + u32 tclk_period; /* nanoseconds */ + u32 tmr_prsc; + u32 tmr_add; + u32 cksel; + u32 tmr_fiper1; + u32 tmr_fiper2; +}; + +static inline u32 qoriq_read(unsigned __iomem *addr) +{ + u32 val; + + val = ioread32be(addr); + return val; +} + +static inline void qoriq_write(unsigned __iomem *addr, u32 val) +{ + iowrite32be(val, addr); +} + +/* + * Register access functions + */ + +/* Caller must hold qoriq_ptp->lock. */ +static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) +{ + u64 ns; + u32 lo, hi; + + lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l); + hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h); + ns = ((u64) hi) << 32; + ns |= lo; + return ns; +} + +/* Caller must hold qoriq_ptp->lock. */ +static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) +{ + u32 hi = ns >> 32; + u32 lo = ns & 0xffffffff; + + qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi); +} + +/* Caller must hold qoriq_ptp->lock. */ +static void set_alarm(struct qoriq_ptp *qoriq_ptp) +{ + u64 ns; + u32 lo, hi; + + ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL; + ns = div_u64(ns, 1000000000UL) * 1000000000ULL; + ns -= qoriq_ptp->tclk_period; + hi = ns >> 32; + lo = ns & 0xffffffff; + qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi); +} + +/* Caller must hold qoriq_ptp->lock. */ +static void set_fipers(struct qoriq_ptp *qoriq_ptp) +{ + set_alarm(qoriq_ptp); + qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); +} + +/* + * Interrupt service routine + */ + +static irqreturn_t isr(int irq, void *priv) +{ + struct qoriq_ptp *qoriq_ptp = priv; + struct ptp_clock_event event; + u64 ns; + u32 ack = 0, lo, hi, mask, val; + + val = qoriq_read(&qoriq_ptp->regs->tmr_tevent); + + if (val & ETS1) { + ack |= ETS1; + hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h); + lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (val & ETS2) { + ack |= ETS2; + hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h); + lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (val & ALM2) { + ack |= ALM2; + if (qoriq_ptp->alarm_value) { + event.type = PTP_CLOCK_ALARM; + event.index = 0; + event.timestamp = qoriq_ptp->alarm_value; + ptp_clock_event(qoriq_ptp->clock, &event); + } + if (qoriq_ptp->alarm_interval) { + ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval; + hi = ns >> 32; + lo = ns & 0xffffffff; + spin_lock(&qoriq_ptp->lock); + qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi); + spin_unlock(&qoriq_ptp->lock); + qoriq_ptp->alarm_value = ns; + } else { + qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2); + spin_lock(&qoriq_ptp->lock); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask &= ~ALM2EN; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock(&qoriq_ptp->lock); + qoriq_ptp->alarm_value = 0; + qoriq_ptp->alarm_interval = 0; + } + } + + if (val & PP1) { + ack |= PP1; + event.type = PTP_CLOCK_PPS; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (ack) { + qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack); + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * PTP clock operations + */ + +static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + u64 adj, diff; + u32 tmr_add; + int neg_adj = 0; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + tmr_add = qoriq_ptp->tmr_add; + adj = tmr_add; + + /* calculate diff as adj*(scaled_ppm/65536)/1000000 + * and round() to the nearest integer + */ + adj *= scaled_ppm; + diff = div_u64(adj, 8000000); + diff = (diff >> 13) + ((diff >> 12) & 1); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add); + + return 0; +} + +static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + now = tmr_cnt_read(qoriq_ptp); + now += delta; + tmr_cnt_write(qoriq_ptp, now); + set_fipers(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + return 0; +} + +static int ptp_qoriq_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + ns = tmr_cnt_read(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_qoriq_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + tmr_cnt_write(qoriq_ptp, ns); + set_fipers(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + return 0; +} + +static int ptp_qoriq_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + unsigned long flags; + u32 bit, mask; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + switch (rq->extts.index) { + case 0: + bit = ETS1EN; + break; + case 1: + bit = ETS2EN; + break; + default: + return -EINVAL; + } + spin_lock_irqsave(&qoriq_ptp->lock, flags); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + if (on) + mask |= bit; + else + mask &= ~bit; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&qoriq_ptp->lock, flags); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + if (on) + mask |= PP1EN; + else + mask &= ~PP1EN; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info ptp_qoriq_caps = { + .owner = THIS_MODULE, + .name = "qoriq ptp clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = N_EXT_TS, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int qoriq_ptp_probe(struct platform_device *dev) +{ + struct device_node *node = dev->dev.of_node; + struct qoriq_ptp *qoriq_ptp; + struct timespec64 now; + int err = -ENOMEM; + u32 tmr_ctrl; + unsigned long flags; + + qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); + if (!qoriq_ptp) + goto no_memory; + + err = -ENODEV; + + qoriq_ptp->caps = ptp_qoriq_caps; + + if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel)) + qoriq_ptp->cksel = DEFAULT_CKSEL; + + if (of_property_read_u32(node, + "fsl,tclk-period", &qoriq_ptp->tclk_period) || + of_property_read_u32(node, + "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) || + of_property_read_u32(node, + "fsl,tmr-add", &qoriq_ptp->tmr_add) || + of_property_read_u32(node, + "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) || + of_property_read_u32(node, + "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) || + of_property_read_u32(node, + "fsl,max-adj", &qoriq_ptp->caps.max_adj)) { + pr_err("device tree node missing required elements\n"); + goto no_node; + } + + qoriq_ptp->irq = platform_get_irq(dev, 0); + + if (qoriq_ptp->irq < 0) { + pr_err("irq not in device tree\n"); + goto no_node; + } + if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) { + pr_err("request_irq failed\n"); + goto no_node; + } + + qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!qoriq_ptp->rsrc) { + pr_err("no resource\n"); + goto no_resource; + } + if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) { + pr_err("resource busy\n"); + goto no_resource; + } + + spin_lock_init(&qoriq_ptp->lock); + + qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start, + resource_size(qoriq_ptp->rsrc)); + if (!qoriq_ptp->regs) { + pr_err("ioremap ptp registers failed\n"); + goto no_ioremap; + } + getnstimeofday64(&now); + ptp_qoriq_settime(&qoriq_ptp->caps, &now); + + tmr_ctrl = + (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl); + qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add); + qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc); + qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + set_alarm(qoriq_ptp); + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev); + if (IS_ERR(qoriq_ptp->clock)) { + err = PTR_ERR(qoriq_ptp->clock); + goto no_clock; + } + qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock); + + platform_set_drvdata(dev, qoriq_ptp); + + return 0; + +no_clock: + iounmap(qoriq_ptp->regs); +no_ioremap: + release_resource(qoriq_ptp->rsrc); +no_resource: + free_irq(qoriq_ptp->irq, qoriq_ptp); +no_node: + kfree(qoriq_ptp); +no_memory: + return err; +} + +static int qoriq_ptp_remove(struct platform_device *dev) +{ + struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); + + qoriq_write(&qoriq_ptp->regs->tmr_temask, 0); + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0); + + ptp_clock_unregister(qoriq_ptp->clock); + iounmap(qoriq_ptp->regs); + release_resource(qoriq_ptp->rsrc); + free_irq(qoriq_ptp->irq, qoriq_ptp); + kfree(qoriq_ptp); + + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, +}; +MODULE_DEVICE_TABLE(of, match_table); + +static struct platform_driver qoriq_ptp_driver = { + .driver = { + .name = "ptp_qoriq", + .of_match_table = match_table, + }, + .probe = qoriq_ptp_probe, + .remove = qoriq_ptp_remove, +}; + +module_platform_driver(qoriq_ptp_driver); + +MODULE_AUTHOR("Richard Cochran "); +MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 7349a74ea75ca27606ead81df3ed67f1b32a94ba Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Fri, 25 May 2018 12:40:36 +0800 Subject: net: ethernet: gianfar_ethtool: get phc index through drvdata Global variable gfar_phc_index was used to get and store phc index through gianfar_ptp driver. However gianfar_ptp had been renamed as ptp_qoriq for QorIQ common PTP driver. This gfar_phc_index doesn't work any more, and the phc index is stored in drvdata now. This patch is to support getting phc index through ptp_qoriq drvdata. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.h | 3 --- drivers/net/ethernet/freescale/gianfar_ethtool.c | 23 ++++++++++++++++++----- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 5aa814799d70..8e42c0246611 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1372,7 +1372,4 @@ struct filer_table { struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; }; -/* The gianfar_ptp module will set this variable */ -extern int gfar_phc_index; - #endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index a93e0199c369..8cb98cae0a6f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "gianfar.h" @@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -int gfar_phc_index = -1; -EXPORT_SYMBOL(gfar_phc_index); - static int gfar_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); + struct platform_device *ptp_dev; + struct device_node *ptp_node; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } + + ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); + if (ptp_node) { + ptp_dev = of_find_device_by_node(ptp_node); + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + } + + if (ptp) + info->phc_index = ptp->phc_index; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = gfar_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | -- cgit v1.2.3 From 6528e02cc9ff7a195e2f81fd422458cefa83ad10 Mon Sep 17 00:00:00 2001 From: Christophe Roullier Date: Fri, 25 May 2018 09:46:38 +0200 Subject: net: ethernet: stmmac: add adaptation for stm32mp157c. Glue codes to support stm32mp157c device and stay compatible with stm32 mcu familly Signed-off-by: Christophe Roullier Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c | 267 ++++++++++++++++++++-- 1 file changed, 252 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 9e6db16af663..7e2e79dedebf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -16,49 +16,180 @@ #include #include #include +#include #include #include #include #include "stmmac_platform.h" -#define MII_PHY_SEL_MASK BIT(23) +#define SYSCFG_MCU_ETH_MASK BIT(23) +#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16) + +#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16) +#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17) +#define SYSCFG_PMCR_ETH_SEL_MII BIT(20) +#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) +#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) +#define SYSCFG_PMCR_ETH_SEL_GMII 0 +#define SYSCFG_MCU_ETH_SEL_MII 0 +#define SYSCFG_MCU_ETH_SEL_RMII 1 struct stm32_dwmac { struct clk *clk_tx; struct clk *clk_rx; + struct clk *clk_eth_ck; + struct clk *clk_ethstp; + struct clk *syscfg_clk; + bool int_phyclk; /* Clock from RCC to drive PHY */ u32 mode_reg; /* MAC glue-logic mode register */ struct regmap *regmap; u32 speed; + const struct stm32_ops *ops; + struct device *dev; +}; + +struct stm32_ops { + int (*set_mode)(struct plat_stmmacenet_data *plat_dat); + int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare); + int (*suspend)(struct stm32_dwmac *dwmac); + void (*resume)(struct stm32_dwmac *dwmac); + int (*parse_data)(struct stm32_dwmac *dwmac, + struct device *dev); + u32 syscfg_eth_mask; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - u32 reg = dwmac->mode_reg; - u32 val; int ret; - val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1; - ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val); - if (ret) - return ret; + if (dwmac->ops->set_mode) { + ret = dwmac->ops->set_mode(plat_dat); + if (ret) + return ret; + } ret = clk_prepare_enable(dwmac->clk_tx); if (ret) return ret; - ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) - clk_disable_unprepare(dwmac->clk_tx); + if (!dwmac->dev->power.is_suspended) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) { + clk_disable_unprepare(dwmac->clk_tx); + return ret; + } + } + + if (dwmac->ops->clk_prepare) { + ret = dwmac->ops->clk_prepare(dwmac, true); + if (ret) { + clk_disable_unprepare(dwmac->clk_rx); + clk_disable_unprepare(dwmac->clk_tx); + } + } return ret; } +static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) +{ + int ret = 0; + + if (prepare) { + ret = clk_prepare_enable(dwmac->syscfg_clk); + if (ret) + return ret; + + if (dwmac->int_phyclk) { + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) { + clk_disable_unprepare(dwmac->syscfg_clk); + return ret; + } + } + } else { + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + } + return ret; +} + +static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_PMCR_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_GMII: + val = SYSCFG_PMCR_ETH_SEL_GMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_PMCR_ETH_SEL_RMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + case PHY_INTERFACE_MODE_RGMII: + val = SYSCFG_PMCR_ETH_SEL_RGMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + +static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_MCU_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_MCU_ETH_SEL_RMII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) { clk_disable_unprepare(dwmac->clk_tx); clk_disable_unprepare(dwmac->clk_rx); + + if (dwmac->ops->clk_prepare) + dwmac->ops->clk_prepare(dwmac, false); } static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, @@ -70,15 +201,22 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, /* Get TX/RX clocks */ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx"); if (IS_ERR(dwmac->clk_tx)) { - dev_err(dev, "No tx clock provided...\n"); + dev_err(dev, "No ETH Tx clock provided...\n"); return PTR_ERR(dwmac->clk_tx); } + dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx"); if (IS_ERR(dwmac->clk_rx)) { - dev_err(dev, "No rx clock provided...\n"); + dev_err(dev, "No ETH Rx clock provided...\n"); return PTR_ERR(dwmac->clk_rx); } + if (dwmac->ops->parse_data) { + err = dwmac->ops->parse_data(dwmac, dev); + if (err) + return err; + } + /* Get mode register */ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); if (IS_ERR(dwmac->regmap)) @@ -91,11 +229,46 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, return err; } +static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, + struct device *dev) +{ + struct device_node *np = dev->of_node; + + dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk"); + + /* Check if internal clk from RCC selected */ + if (dwmac->int_phyclk) { + /* Get ETH_CLK clocks */ + dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck"); + if (IS_ERR(dwmac->clk_eth_ck)) { + dev_err(dev, "No ETH CK clock provided...\n"); + return PTR_ERR(dwmac->clk_eth_ck); + } + } + + /* Clock used for low power mode */ + dwmac->clk_ethstp = devm_clk_get(dev, "ethstp"); + if (IS_ERR(dwmac->clk_ethstp)) { + dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n"); + return PTR_ERR(dwmac->clk_ethstp); + } + + /* Clock for sysconfig */ + dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk"); + if (IS_ERR(dwmac->syscfg_clk)) { + dev_err(dev, "No syscfg clock provided...\n"); + return PTR_ERR(dwmac->syscfg_clk); + } + + return 0; +} + static int stm32_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct stm32_dwmac *dwmac; + const struct stm32_ops *data; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -112,6 +285,16 @@ static int stm32_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no of match data provided\n"); + ret = -EINVAL; + goto err_remove_config_dt; + } + + dwmac->ops = data; + dwmac->dev = &pdev->dev; + ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); @@ -149,15 +332,48 @@ static int stm32_dwmac_remove(struct platform_device *pdev) return ret; } +static int stm32mp1_suspend(struct stm32_dwmac *dwmac) +{ + int ret = 0; + + ret = clk_prepare_enable(dwmac->clk_ethstp); + if (ret) + return ret; + + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + + return ret; +} + +static void stm32mp1_resume(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_ethstp); +} + +static int stm32mcu_suspend(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_rx); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int stm32_dwmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; + int ret; ret = stmmac_suspend(dev); - stm32_dwmac_clk_disable(priv->plat->bsp_priv); + + if (dwmac->ops->suspend) + ret = dwmac->ops->suspend(dwmac); return ret; } @@ -166,8 +382,12 @@ static int stm32_dwmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; int ret; + if (dwmac->ops->resume) + dwmac->ops->resume(dwmac); + ret = stm32_dwmac_init(priv->plat); if (ret) return ret; @@ -181,8 +401,24 @@ static int stm32_dwmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume); +static struct stm32_ops stm32mcu_dwmac_data = { + .set_mode = stm32mcu_set_mode, + .suspend = stm32mcu_suspend, + .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK +}; + +static struct stm32_ops stm32mp1_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .clk_prepare = stm32mp1_clk_prepare, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK +}; + static const struct of_device_id stm32_dwmac_match[] = { - { .compatible = "st,stm32-dwmac"}, + { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data}, + { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data}, { } }; MODULE_DEVICE_TABLE(of, stm32_dwmac_match); @@ -199,5 +435,6 @@ static struct platform_driver stm32_dwmac_driver = { module_platform_driver(stm32_dwmac_driver); MODULE_AUTHOR("Alexandre Torgue "); -MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer"); +MODULE_AUTHOR("Christophe Roullier "); +MODULE_DESCRIPTION("STMicroelectronics STM32 DWMAC Specific Glue layer"); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 026e57585ff1598499bba119fe57306b83011f95 Mon Sep 17 00:00:00 2001 From: Christophe Roullier Date: Fri, 25 May 2018 09:46:40 +0200 Subject: net: stmmac: add dwmac-4.20a compatible Manage dwmac-4.20a version from synopsys Signed-off-by: Christophe Roullier Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ebd3e5ffa73c..6d141f3931eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -472,7 +472,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } if (of_device_is_compatible(np, "snps,dwmac-4.00") || - of_device_is_compatible(np, "snps,dwmac-4.10a")) { + of_device_is_compatible(np, "snps,dwmac-4.10a") || + of_device_is_compatible(np, "snps,dwmac-4.20a")) { plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; -- cgit v1.2.3 From 102cd909635612c0be784a519651954a7924c786 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Fri, 25 May 2018 15:00:20 +0200 Subject: qmi_wwan: apply SET_DTR quirk to the SIMCOM shared device ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SIMCOM are reusing a single device ID for many (all of their?) different modems, based on different chipsets and firmwares. Newer Qualcomm chipset generations require setting DTR to wake the QMI function. The SIM7600E modem is using such a chipset, making it fail to work with this driver despite the device ID match. Fix by unconditionally enabling the SET_DTR quirk for all SIMCOM modems using this specific device ID. This is similar to what we already have done for another case of device IDs recycled over multiple chipset generations: 14cf4a771b30 ("drivers: net: usb: qmi_wwan: add QMI_QUIRK_SET_DTR for Telit PID 0x1201") Initial testing on an older SIM7100 modem shows no immediate side effects. Reported-by: Sebastian Sjoholm Cc: Reinhard Speyerer Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 42565dd33aa6..148e78f8b48c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1248,7 +1248,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ - {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ -- cgit v1.2.3 From 846fcc83638f8908a465583fa93f4d6f14161420 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 25 May 2018 19:42:56 +0100 Subject: net: hns3: Updates RX packet info fetch in case of multi BD In the latest revision of the hardware, if a packet is spanning across multiple BDs then only VLD bit and current data size info is valid in each BD, and rest of the information is only valid in the last BD of the packet. In such case we should make sure we are fetching RX packet size from the first descriptor and information like VLAN should be fetched from last BD. Signed-off-by: Peng Li Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 36 ++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index cac51954f2cf..ae8d7493c813 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2085,9 +2085,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetch(desc); - length = le16_to_cpu(desc->rx.pkt_len); + length = le16_to_cpu(desc->rx.size); bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); /* Check valid BD */ if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) @@ -2121,22 +2120,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2172,6 +2155,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } *out_bnum = bnum; + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + + l234info = le32_to_cpu(desc->rx.l234_info); if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", -- cgit v1.2.3 From dcb35ccef85e51cf3ad36acd08c07ec8d9ff9e2a Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 25 May 2018 19:42:57 +0100 Subject: net: hns3: Add support for tx_accept_tag2 and tx_accept_untag2 config HNS3 Hardware can support up to two VLAN tags in transmit leg, the PPP module can handle the packets based on the tag1 and tag2 config. This patch adds support for tag2 config for vlan handling Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 7 ++++-- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 26 +++++++++++++++++----- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 6 +++-- 3 files changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ee3cbac6dfaa..3fa08f780b06 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -704,11 +704,14 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; -#define HCLGE_ACCEPT_TAG_B 0 -#define HCLGE_ACCEPT_UNTAG_B 1 +#define HCLGE_ACCEPT_TAG1_B 0 +#define HCLGE_ACCEPT_UNTAG1_B 1 #define HCLGE_PORT_INS_TAG1_EN_B 2 #define HCLGE_PORT_INS_TAG2_EN_B 3 #define HCLGE_CFG_NIC_ROCE_SEL_B 4 +#define HCLGE_ACCEPT_TAG2_B 5 +#define HCLGE_ACCEPT_UNTAG2_B 6 + struct hclge_vport_vtag_tx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2f0bbb6708b9..c0b8d5a32bde 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4687,10 +4687,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, - vcfg->accept_tag ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, - vcfg->accept_untag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, vcfg->insert_tag1_en ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, @@ -4814,8 +4818,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag = true; - vport->txvlan_cfg.accept_untag = true; + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + * This two fields can not configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.default_tag1 = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 93177d91eea4..677f1e4521c5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -570,8 +570,10 @@ struct hclge_dev { /* VPort level vlan tag configuration for TX direction */ struct hclge_tx_vtag_cfg { - bool accept_tag; /* Whether accept tagged packet from host */ - bool accept_untag; /* Whether accept untagged packet from host */ + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; bool insert_tag1_en; /* Whether insert inner vlan tag */ bool insert_tag2_en; /* Whether insert outer vlan tag */ u16 default_tag1; /* The default inner vlan tag to insert */ -- cgit v1.2.3 From 5b5455a9ed5a9d7c26313a353bc2c3c1f6e1f5cb Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 25 May 2018 19:42:58 +0100 Subject: net: hns3: Add STRP_TAGP field support for hardware revision 0x21 Hardware Revision(0x21) Buffer Descriptor adds a field STRP_TAGP for vlan stripped processed indication. STRP_TAGP field has 2 bits, bit 0 is stripped indication of the vlan tag in outer vlan tag field, bit 1 is stripped indication of the vlan tag in inner vlan tag field. For each bit, 0 indicates the tag is not stripped and 1 indicates the tag is stripped. This patch adds STRP_TAGP support for revision(0x21), and does not change the revision(0x20) action. Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 42 ++++++++++++++++++++++--- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 3 ++ 2 files changed, 40 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ae8d7493c813..1bcb676f459c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2066,6 +2066,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } +static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info) +{ + struct pci_dev *pdev = ring->tqp->handle->pdev; + u16 vlan_tag; + + if (pdev->revision == 0x20) { + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return vlan_tag; + } + +#define HNS3_STRP_OUTER_VLAN 0x1 +#define HNS3_STRP_INNER_VLAN 0x2 + + switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { + case HNS3_STRP_OUTER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + break; + case HNS3_STRP_INNER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + break; + default: + vlan_tag = 0; + break; + } + + return vlan_tag; +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb, int *out_bnum) { @@ -2155,6 +2188,9 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } *out_bnum = bnum; + + l234info = le32_to_cpu(desc->rx.l234_info); + /* Based on hw strategy, the tag offloaded will be stored at * ot_vlan_tag in two layer tag case, and stored at vlan_tag * in one layer tag case. @@ -2162,17 +2198,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - l234info = le32_to_cpu(desc->rx.l234_info); - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5b40f5a53761..38e91ca71e07 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -104,6 +104,9 @@ enum hns3_nic_state { #define HNS3_RXD_L4ID_S 8 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) #define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_STRP_TAGP_S 13 +#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) + #define HNS3_RXD_L2E_B 16 #define HNS3_RXD_L3E_B 17 #define HNS3_RXD_L4E_B 18 -- cgit v1.2.3 From 96c0e8614eba889d8beb00e9d74168c1c607908b Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 25 May 2018 19:42:59 +0100 Subject: net: hns3: Add support to enable TX/RX promisc mode for H/W rev(0x21) HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on pdev revision(0x20), new revision(0x21) supports them. Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 2 ++ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3fa08f780b06..e9be6aa88b65 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -484,6 +484,8 @@ struct hclge_promisc_param { u8 enable; }; +#define HCLGE_PROMISC_TX_EN_B BIT(4) +#define HCLGE_PROMISC_RX_EN_B BIT(5) #define HCLGE_PROMISC_EN_B 1 #define HCLGE_PROMISC_EN_ALL 0x7 #define HCLGE_PROMISC_EN_UC 0x1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c0b8d5a32bde..8ad8f62dc501 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3586,7 +3586,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, req = (struct hclge_promisc_cfg_cmd *)desc.data; req->vf_id = param->vf_id; - req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + */ + req->flag = (param->enable << HCLGE_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { -- cgit v1.2.3 From 7c4bfcb0556bc8e1cd86b25db0a5058b49f5a085 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Fri, 25 May 2018 19:43:00 +0100 Subject: net: hns3: Fix for PF mailbox receving unknown message Before the firmware updates the crq's tail pointer, if the PF driver reads the data in the crq, the data may be incomplete at this time, which will lead to the driver read an unknown message. This patch fixes it by checking if crq is not empty before reading the message. Fixes: c1a81619d73a ("net: hns3: Add mailbox interrupt handling to PF driver") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 23 +++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index b6ae26ba0a46..31f3d9a43d8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -382,6 +382,13 @@ static void hclge_reset_vf(struct hclge_vport *vport, hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); } +static bool hclge_cmd_crq_empty(struct hclge_hw *hw) +{ + u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -390,12 +397,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_desc *desc; int ret, flag; - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); /* handle all the mailbox requests in the queue */ - while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { + while (!hclge_cmd_crq_empty(&hdev->hw)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %d\n", + req->msg[0]); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + vport = &hdev->vport[req->mbx_src_vfid]; switch (req->msg[0]) { @@ -470,7 +488,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev) } crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ -- cgit v1.2.3 From 90b99b094b4bd270542e3ade8cf74e21c228c069 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 25 May 2018 19:43:01 +0100 Subject: net: hns3: Fixes the state to indicate client-type initialization HNAE3 module supports kernel nic driver, user nic driver and roce driver, and there are 3 client types. Driver uses one bit(HNAE3_CLIENT_INITED_B) to indicate the client initialization state, it will cause confusion for 3 client types. This patch fixes it by use 3 bits to indicate the initialization state. Fixes: 38caee9d3ee8 ("net: hns3: Add support of the HNAE3 framework") Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 49 +++++++++++++++++++++++++++-- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 4 ++- 2 files changed, 49 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 63d7dbfb90bf..9d79dad2c6aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } +static void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) +{ + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_UNIC: + hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_ROCE: + hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + break; + default: + break; + } +} + +static int hnae3_get_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + int inited = 0; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_KNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_UNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_UNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_ROCE: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); + break; + default: + break; + } + + return inited; +} + static int hnae3_match_n_instantiate(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, bool is_reg) { @@ -56,14 +99,14 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, return ret; } - hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1); + hnae3_set_client_init_flag(client, ae_dev, 1); return 0; } - if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) { + if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0); + hnae3_set_client_init_flag(client, ae_dev, 0); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 45c571eea2ae..f250c592a218 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -54,7 +54,9 @@ #define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_DCB_B 0x2 -#define HNAE3_CLIENT_INITED_B 0x3 +#define HNAE3_KNIC_CLIENT_INITED_B 0x3 +#define HNAE3_UNIC_CLIENT_INITED_B 0x4 +#define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) -- cgit v1.2.3 From 7d0b130cbbfa4651cc1ab9268a2956c1b9d82ff9 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 25 May 2018 19:43:02 +0100 Subject: net: hns3: Fixes the init of the VALID BD info in the descriptor RX Buffer Descriptor contains a VALID bit which indicates if the BD is valid and has some data. This field is set by HNS3 hardware to intimate the driver of some valid data present in the BD. nd should be reset by the driver when BD is being used again. In the existing code this bit was not being (re-)initialized properly and hence was causing problems. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1bcb676f459c..d1ef104d6478 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1819,6 +1819,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1826,6 +1827,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, -- cgit v1.2.3 From 6986df9686c55464b0fa0f36a02f4da81d99a3b1 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 25 May 2018 19:43:03 +0100 Subject: net: hns3: Removes unnecessary check when clearing TX/RX rings Our code will ensure that hns3_clear_tx_ring is not used to cleared RX rings and hns3_clear_rx_ring is not used to cleared TX rings. So the ring type check is unnecessary. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index d1ef104d6478..4c13c527d3e9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3217,9 +3217,6 @@ static void hns3_recover_hw_addr(struct net_device *ndev) static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { - if (!HNAE3_IS_TX_RING(ring)) - return; - while (ring->next_to_clean != ring->next_to_use) { hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -3228,9 +3225,6 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) { - if (HNAE3_IS_TX_RING(ring)) - return; - while (ring->next_to_use != ring->next_to_clean) { /* When a buffer is not reused, it's memory has been * freed in hns3_handle_rx_bd or will be freed by -- cgit v1.2.3 From 7b763f3f7e52b1e0284c9f42acc4229935b7ec5f Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 25 May 2018 19:43:04 +0100 Subject: net: hns3: Clear TX/RX rings when stopping port & un-initializing client When we down the port, some packets are left in TX/RX buffer. When we up the port again, these old packets are forwarded to protocol stack or are sent to internet. It will make some problem. TX/RX buffer should be cleared when stopping port. This patch adds some function to ensure the buffer is clean when port is started. We should clear the rings when clients are being un-initialized as well. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 115 +++++++++++++++++++-- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 + drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 4 + 3 files changed, 110 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4c13c527d3e9..05290129793f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -25,6 +25,9 @@ #include "hnae3.h" #include "hns3_enet.h" +static void hns3_clear_all_ring(struct hnae3_handle *h); +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); + static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; static const char hns3_driver_string[] = @@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev) int i, j; int ret; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + /* get irq resource for all vectors */ ret = hns3_nic_init_irq(priv); if (ret) { @@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) ops->stop(priv->ae_handle); - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - /* free irq resources */ hns3_nic_uninit_irq(priv); + + hns3_clear_all_ring(priv->ae_handle); } static int hns3_nic_net_stop(struct net_device *netdev) @@ -2939,8 +2948,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) goto out_when_alloc_ring_memory; } - hns3_init_ring_hw(priv->ring_data[i].ring); - u64_stats_init(&priv->ring_data[i].ring->syncp); } @@ -3102,6 +3109,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_force_clear_all_rx_ring(handle); + ret = hns3_nic_uninit_vector_data(priv); if (ret) netdev_err(netdev, "uninit vector error\n"); @@ -3218,12 +3227,46 @@ static void hns3_recover_hw_addr(struct net_device *ndev) static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { + ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); } } -static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) +static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) +{ + struct hns3_desc_cb res_cbs; + int ret; + + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so we need to replace the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + /* if alloc new buffer fail, exit directly + * and reclear in up flow. + */ + netdev_warn(ring->tqp->handle->kinfo.netdev, + "reserve buffer map failed, ret = %d\n", + ret); + return ret; + } + hns3_replace_buffer(ring, ring->next_to_use, + &res_cbs); + } + ring_ptr_move_fw(ring, next_to_use); + } + + return 0; +} + +static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_use != ring->next_to_clean) { /* When a buffer is not reused, it's memory has been @@ -3240,6 +3283,19 @@ static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) } } +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *ring; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_force_clear_rx_ring(ring); + } +} + static void hns3_clear_all_ring(struct hnae3_handle *h) { struct net_device *ndev = h->kinfo.netdev; @@ -3257,10 +3313,51 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) netdev_tx_reset_queue(dev_queue); ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + /* Continue to clear other rings even if clearing some + * rings failed. + */ hns3_clear_rx_ring(ring); } } +int hns3_nic_reset_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *rx_ring; + int i, j; + int ret; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->ae_algo->ops->reset_queue(h, i); + hns3_init_ring_hw(priv->ring_data[i].ring); + + /* We need to clear tx ring here because self test will + * use the ring and will not run down before up + */ + hns3_clear_tx_ring(priv->ring_data[i].ring); + priv->ring_data[i].ring->next_to_clean = 0; + priv->ring_data[i].ring->next_to_use = 0; + + rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_init_ring_hw(rx_ring); + ret = hns3_clear_rx_ring(rx_ring); + if (ret) + return ret; + + /* We can not know the hardware head and tail when this + * function is called in reset flow, so we reuse all desc. + */ + for (j = 0; j < rx_ring->desc_num; j++) + hns3_reuse_buffer(rx_ring, j); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + } + + return 0; +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -3330,7 +3427,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_clear_all_ring(handle); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); if (ret) { @@ -3466,8 +3563,6 @@ int hns3_set_channels(struct net_device *netdev, if (if_running) hns3_nic_net_stop(netdev); - hns3_clear_all_ring(h); - ret = hns3_nic_uninit_vector_data(priv); if (ret) { dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 38e91ca71e07..3b083d5ae9ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -625,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev, bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c16bb6cb0564..8f8cc2413656 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -108,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) if (!h->ae_algo->ops->start) return -EOPNOTSUPP; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + ret = h->ae_algo->ops->start(h); if (ret) { netdev_err(ndev, -- cgit v1.2.3 From 16b496fd9063a8e00e79438bab50ff45f071be6a Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 25 May 2018 19:43:05 +0100 Subject: net: hns3: Remove unused led control code In the previous implementation of led control for fibre port , parses the port speed configuration, checks the link status and traffic status per second, and updates the blink status of link led, traffic led and speed led. Now, the firmware takes responsibility to handle the led, the dirver just needs to deal with locate command. So the codes for link led, traffic led and speed led are useless now. This patch removes these redundant codes. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 - .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 109 --------------------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 - 3 files changed, 112 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index e9be6aa88b65..de2f6f118859 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -115,7 +115,6 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314, /* MACSEC command */ /* PFC/Pause CMD*/ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8ad8f62dc501..20988aadae21 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); -static int hclge_update_led_status(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -504,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) return 0; } -static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) -{ - struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; - struct hclge_desc desc; - __le64 *desc_data; - int ret; - - /* for fiber port, need to query the total rx/tx packets statstics, - * used for data transferring checking. - */ - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) - return 0; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get MAC total pkt stats fail, ret = %d\n", ret); - - return ret; - } - - desc_data = (__le64 *)(&desc.data[0]); - mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); - mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -2916,20 +2883,13 @@ static void hclge_service_task(struct work_struct *work) struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); - /* The total rx/tx packets statstics are wanted to be updated - * per second. Both hclge_update_stats_for_all() and - * hclge_mac_get_traffic_stats() can do it. - */ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { hclge_update_stats_for_all(hdev); hdev->hw_stats.stats_timer = 0; - } else { - hclge_mac_get_traffic_stats(hdev); } hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); - hclge_update_led_status(hdev); hclge_service_complete(hdev); } @@ -6100,75 +6060,6 @@ static int hclge_set_led_id(struct hnae3_handle *handle, return ret; } -enum hclge_led_port_speed { - HCLGE_SPEED_LED_FOR_1G, - HCLGE_SPEED_LED_FOR_10G, - HCLGE_SPEED_LED_FOR_25G, - HCLGE_SPEED_LED_FOR_40G, - HCLGE_SPEED_LED_FOR_50G, - HCLGE_SPEED_LED_FOR_100G, -}; - -static u8 hclge_led_get_speed_status(u32 speed) -{ - u8 speed_led; - - switch (speed) { - case HCLGE_MAC_SPEED_1G: - speed_led = HCLGE_SPEED_LED_FOR_1G; - break; - case HCLGE_MAC_SPEED_10G: - speed_led = HCLGE_SPEED_LED_FOR_10G; - break; - case HCLGE_MAC_SPEED_25G: - speed_led = HCLGE_SPEED_LED_FOR_25G; - break; - case HCLGE_MAC_SPEED_40G: - speed_led = HCLGE_SPEED_LED_FOR_40G; - break; - case HCLGE_MAC_SPEED_50G: - speed_led = HCLGE_SPEED_LED_FOR_50G; - break; - case HCLGE_MAC_SPEED_100G: - speed_led = HCLGE_SPEED_LED_FOR_100G; - break; - default: - speed_led = HCLGE_LED_NO_CHANGE; - } - - return speed_led; -} - -static int hclge_update_led_status(struct hclge_dev *hdev) -{ - u8 port_speed_status, link_status, activity_status; - u64 rx_pkts, tx_pkts; - - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); - - rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; - tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; - if (rx_pkts != hdev->rx_pkts_for_led || - tx_pkts != hdev->tx_pkts_for_led) - activity_status = HCLGE_LED_ON; - else - activity_status = HCLGE_LED_OFF; - hdev->rx_pkts_for_led = rx_pkts; - hdev->tx_pkts_for_led = tx_pkts; - - if (hdev->hw.mac.link) - link_status = HCLGE_LED_ON; - else - link_status = HCLGE_LED_OFF; - - return hclge_set_led_status_sfp(hdev, port_speed_status, - activity_status, link_status, - HCLGE_LED_NO_CHANGE); -} - static void hclge_get_link_mode(struct hnae3_handle *handle, unsigned long *supported, unsigned long *advertising) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 677f1e4521c5..7fcabdeb4ce9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -563,8 +563,6 @@ struct hclge_dev { struct hclge_vlan_type_cfg vlan_type_cfg; - u64 rx_pkts_for_led; - u64 tx_pkts_for_led; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; }; -- cgit v1.2.3 From f6f75abc46db2002f28705a2fd4867e895169807 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 25 May 2018 19:43:06 +0100 Subject: net: hns3: Adds support for led locate command for copper port Firmware now supports control of all leds. Existing HNS3 driver code only supported led locate command over SFP Fibre ports. But now it is also supported over copper port. This patch removes existing not needed code for the led locate command and updates the led control command between driver and firmware. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 12 ++------ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 35 +++------------------- 2 files changed, 6 insertions(+), 41 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index de2f6f118859..d9aaa76c76eb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -817,21 +817,13 @@ struct hclge_reset_cmd { #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 -#define HCLGE_LED_PORT_SPEED_STATE_S 0 -#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0) -#define HCLGE_LED_ACTIVITY_STATE_S 0 -#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0) -#define HCLGE_LED_LINK_STATE_S 0 -#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0) #define HCLGE_LED_LOCATE_STATE_S 0 #define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) struct hclge_set_led_state_cmd { - u8 port_speed_led_config; - u8 link_led_config; - u8 activity_led_config; + u8 rsv1[3]; u8 locate_led_config; - u8 rsv[20]; + u8 rsv2[20]; }; int hclge_cmd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 20988aadae21..69166858a6dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5991,9 +5991,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, "Get 64 bit register failed, ret = %d.\n", ret); } -static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, - u8 act_led_status, u8 link_led_status, - u8 locate_led_status) +static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) { struct hclge_set_led_state_cmd *req; struct hclge_desc desc; @@ -6002,12 +6000,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, - HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); - hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, - HCLGE_LED_ACTIVITY_STATE_S, act_led_status); - hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, - HCLGE_LED_LINK_STATE_S, link_led_status); hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, HCLGE_LED_LOCATE_STATE_S, locate_led_status); @@ -6028,36 +6020,17 @@ enum hclge_led_status { static int hclge_set_led_id(struct hnae3_handle *handle, enum ethtool_phys_id_state status) { -#define BLINK_FREQUENCY 2 struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - int ret = 0; - - if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return -EOPNOTSUPP; switch (status) { case ETHTOOL_ID_ACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_ON); - break; + return hclge_set_led_status(hdev, HCLGE_LED_ON); case ETHTOOL_ID_INACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_OFF); - break; + return hclge_set_led_status(hdev, HCLGE_LED_OFF); default: - ret = -EINVAL; - break; + return -EINVAL; } - - return ret; } static void hclge_get_link_mode(struct hnae3_handle *handle, -- cgit v1.2.3 From 544a7bcd5cc74734003cf7f12502b26cebfed19f Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Fri, 25 May 2018 19:43:07 +0100 Subject: net: hns3: Fixes initalization of RoCE handle and makes it conditional When register a RoCE client with hnae3vf device, it needs to judge the device whether support RoCE vf function. Otherwise, it will lead to calltrace when RoCE is not support vf function and remove roce device. The calltrace as follows: [ 93.156614] Unable to handle kernel NULL pointer dereference at virtual address 00000015 [ 93.278784] Call trace: [ 93.278788] hnae3_match_n_instantiate+0x24/0xd8 [hnae3] [ 93.278790] hnae3_register_client+0xcc/0x150 [hnae3] [ 93.278801] hns_roce_hw_v2_init+0x18/0x1000 [hns_roce_hw_v2] [ 93.278805] do_one_initcall+0x58/0x160 [ 93.278807] do_init_module+0x64/0x1d8 [ 93.278809] load_module+0x135c/0x15c8 [ 93.278811] SyS_finit_module+0x100/0x118 [ 93.278816] __sys_trace_return+0x0/0x4 [ 93.278827] Code: aa0003f5 12001c56 aa1e03e0 d503201f (b9402660) Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Reported-by: Xinwei Kong Reported-by: Zhou Wang Signed-off-by: Lijun Ou Signed-off-by: Zhou Wang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2b0e3295989f..266cdcba506e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1500,10 +1500,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return ret; break; case HNAE3_CLIENT_ROCE: - hdev->roce_client = client; - hdev->roce.client = client; + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + hdev->roce.client = client; + } - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) return ret; -- cgit v1.2.3 From 728a9dc61f132eb567f58c234e11ef80a3519cc0 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 17 May 2018 11:29:50 -0700 Subject: wlcore: sdio: Fix flakey SDIO runtime PM handling We can have pm_runtime_get_sync() return 1, and we can have pm_runtime_put_sync() return -EBUSY. See rpm_suspend() and rpm_resume() for more information. Fix the issue by returning 0 from wl12xx_sdio_power_on() on success. And use pm_runtime_put() instead of pm_runtime_put_sync() for wl12xx_sdio_power_off(), then the MMC subsystem will idle the bus when suitable. Otherwise wlcore can sometimes get confused and may report bogus errors and WLAN connection can fail. Note that while wlcore checks the return value for wl1271_power_on(), the return value is ignored for wl1271_power_off(). Let's fix them both though to avoid further confusion in the future. Fixes: 60f36637bbbd ("wlcore: sdio: allow pm to handle sdio power") Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sdio.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 6dbe61d47dc3..15d5ac126061 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -159,28 +159,36 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) pm_runtime_put_noidle(&card->dev); dev_err(glue->dev, "%s: failed to get_sync(%d)\n", __func__, ret); - goto out; + + return ret; } sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); -out: - return ret; + return 0; } static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; + int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Let runtime PM know the card is powered off */ - return pm_runtime_put_sync(&card->dev); + error = pm_runtime_put(&card->dev); + if (error < 0 && error != -EBUSY) { + dev_err(&card->dev, "%s failed: %i\n", __func__, error); + + return error; + } + + return 0; } static int wl12xx_sdio_set_power(struct device *child, bool enable) -- cgit v1.2.3 From 6acfbb81ab0a800fd0ce0795103852255d671635 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Fri, 18 May 2018 17:29:54 +0800 Subject: rtlwifi: support accurate nullfunc frame tx ack report In order to realize the keep-alive mechanism in mac80211 stack, reporting accurate tx ack status for nullfunc frame is added in this commit. If current frame is nullfunc frame, we ask firmware to report by filling TX report bit in TX descriptor. After this frame DMA done, TX interrupt is triggered but TX status is unknown at this moment, so enqueue this skb into tx_report->queue. Finally, C2H report will be received if the frame is transmitted successfully or retried over, and then we report to mac80211 with IEEE80211_TX_STAT_ACK flag only if it's successful. Otherwise, if failure or timeout (one second), we report to mac80211 without this flag. Signed-off-by: Tzu-En Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 84 +++++++++++++++++++--- drivers/net/wireless/realtek/rtlwifi/base.h | 5 +- drivers/net/wireless/realtek/rtlwifi/pci.c | 13 ++-- .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 5 +- .../net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 5 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 5 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 16 +++++ 7 files changed, 112 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 762a29cdf7ad..6620c6842b91 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -574,6 +574,7 @@ int rtl_init_core(struct ieee80211_hw *hw) INIT_LIST_HEAD(&rtlpriv->entry_list); INIT_LIST_HEAD(&rtlpriv->c2hcmd_list); INIT_LIST_HEAD(&rtlpriv->scan_list.list); + skb_queue_head_init(&rtlpriv->tx_report.queue); rtlmac->link_state = MAC80211_NOLINK; @@ -585,11 +586,14 @@ int rtl_init_core(struct ieee80211_hw *hw) EXPORT_SYMBOL_GPL(rtl_init_core); static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw); +static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw, + bool timeout); void rtl_deinit_core(struct ieee80211_hw *hw) { rtl_c2hcmd_launcher(hw, 0); rtl_free_entries_from_scan_list(hw); + rtl_free_entries_from_ack_queue(hw, false); } EXPORT_SYMBOL_GPL(rtl_deinit_core); @@ -1575,22 +1579,52 @@ end: } EXPORT_SYMBOL_GPL(rtl_is_special_data); +void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + + __skb_queue_tail(&tx_report->queue, skb); +} +EXPORT_SYMBOL_GPL(rtl_tx_ackqueue); + +static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, + bool ack) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + if (ack) { + RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD, + "tx report: ack\n"); + info->flags |= IEEE80211_TX_STAT_ACK; + } else { + RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD, + "tx report: not ack\n"); + info->flags &= ~IEEE80211_TX_STAT_ACK; + } + ieee80211_tx_status_irqsafe(hw, skb); +} + bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb) { u16 ether_type; const u8 *ether_type_ptr; + __le16 fc = rtl_get_fc(skb); ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true); ether_type = be16_to_cpup((__be16 *)ether_type_ptr); - /* EAPOL */ - if (ether_type == ETH_P_PAE) + if (ether_type == ETH_P_PAE || ieee80211_is_nullfunc(fc)) return true; return false; } -static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw) +static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw, + struct rtlwifi_tx_info *tx_info) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_report *tx_report = &rtlpriv->tx_report; @@ -1604,29 +1638,33 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw) tx_report->last_sent_sn = sn; tx_report->last_sent_time = jiffies; - + tx_info->sn = sn; + tx_info->send_time = tx_report->last_sent_time; RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG, "Send TX-Report sn=0x%X\n", sn); return sn; } -void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, - struct ieee80211_hw *hw) +void rtl_set_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, + struct ieee80211_hw *hw, struct rtlwifi_tx_info *tx_info) { if (ptcb_desc->use_spe_rpt) { - u16 sn = rtl_get_tx_report_sn(hw); + u16 sn = rtl_get_tx_report_sn(hw, tx_info); SET_TX_DESC_SPE_RPT(pdesc, 1); SET_TX_DESC_SW_DEFINE(pdesc, sn); } } -EXPORT_SYMBOL_GPL(rtl_get_tx_report); +EXPORT_SYMBOL_GPL(rtl_set_tx_report); void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + struct rtlwifi_tx_info *tx_info; + struct sk_buff_head *queue = &tx_report->queue; + struct sk_buff *skb; u16 sn; u8 st, retry; @@ -1642,6 +1680,14 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len) tx_report->last_recv_sn = sn; + skb_queue_walk(queue, skb) { + tx_info = rtl_tx_skb_cb_info(skb); + if (tx_info->sn == sn) { + skb_unlink(skb, queue); + rtl_tx_status(hw, skb, st == 0); + break; + } + } RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG, "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n", st, sn, retry); @@ -1909,6 +1955,25 @@ static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw) } } +static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw, + bool chk_timeout) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + struct sk_buff_head *queue = &tx_report->queue; + struct sk_buff *skb, *tmp; + struct rtlwifi_tx_info *tx_info; + + skb_queue_walk_safe(queue, skb, tmp) { + tx_info = rtl_tx_skb_cb_info(skb); + if (chk_timeout && + time_after(tx_info->send_time + HZ, jiffies)) + continue; + skb_unlink(skb, queue); + rtl_tx_status(hw, skb, false); + } +} + void rtl_scan_list_expire(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -2173,6 +2238,9 @@ label_lps_done: /* <6> scan list */ rtl_scan_list_expire(hw); + + /* <7> check ack queue */ + rtl_free_entries_from_ack_queue(hw, true); } void rtl_watch_dog_timer_callback(struct timer_list *t) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index acc924635818..19e7477839e4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -130,9 +130,10 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, bool is_enc); +void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb); bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb); -void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, - struct ieee80211_hw *hw); +void rtl_set_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, + struct ieee80211_hw *hw, struct rtlwifi_tx_info *info); void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len); bool rtl_check_tx_report_acked(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 57bb8f049e59..d0c509ef790e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -619,12 +619,15 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) rtlpriv->link_info.tidtx_inperiod[tid]++; info = IEEE80211_SKB_CB(skb); - ieee80211_tx_info_clear_status(info); - info->flags |= IEEE80211_TX_STAT_ACK; - /*info->status.rates[0].count = 1; */ - - ieee80211_tx_status_irqsafe(hw, skb); + if (likely(!ieee80211_is_nullfunc(fc))) { + ieee80211_tx_info_clear_status(info); + info->flags |= IEEE80211_TX_STAT_ACK; + /*info->status.rates[0].count = 1; */ + ieee80211_tx_status_irqsafe(hw, skb); + } else { + rtl_tx_ackqueue(hw, skb); + } if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) { RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 4f7444331b07..852a2701ef55 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -662,6 +662,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -723,8 +724,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -827,6 +826,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index fd9b38aa08a1..deb8f9501b51 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -431,6 +431,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -488,8 +489,6 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); /* ptcb_desc->use_driver_rate = true; */ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -578,6 +577,8 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 1e1bacf562f3..63fb80039f82 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -691,6 +691,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -740,8 +741,6 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); /* ptcb_desc->use_driver_rate = true; */ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -818,6 +817,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 208010fcde21..1259d2b66d17 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1010,6 +1010,21 @@ enum dm_info_query { DM_INFO_SIZE, }; +struct rtlwifi_tx_info { + int sn; + unsigned long send_time; +}; + +static inline struct rtlwifi_tx_info *rtl_tx_skb_cb_info(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + BUILD_BUG_ON(sizeof(struct rtlwifi_tx_info) > + sizeof(info->status.status_driver_data)); + + return (struct rtlwifi_tx_info *)(info->status.status_driver_data); +} + struct octet_string { u8 *octet; u16 length; @@ -1967,6 +1982,7 @@ struct rtl_tx_report { u16 last_sent_sn; unsigned long last_sent_time; u16 last_recv_sn; + struct sk_buff_head queue; }; struct rtl_ps_ctl { -- cgit v1.2.3 From faa2800653978d67ef6ee2121334d66271ba6789 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:29:55 +0800 Subject: rtlwifi: remove CONNECTION_MONITOR flag To use keep-alive mechanism in mac80211 stack, since driver supports reporting accurate nullfunc frame tx ack now. Signed-off-by: Tzu-En Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 6620c6842b91..759a802ccbee 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -396,7 +396,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); -- cgit v1.2.3 From c4528686d0ea222d0b29b4017ae227546bdbb7d7 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:29:56 +0800 Subject: rtlwifi: remove duplicate rx_packet_type definition Move duplicate definitions from def.h of ic folder to wifi.h Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h | 7 ------- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h | 8 -------- drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h | 8 -------- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h | 8 -------- drivers/net/wireless/realtek/rtlwifi/wifi.h | 8 ++++++++ 5 files changed, 8 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index 0532b9852444..32492d64d685 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h @@ -137,13 +137,6 @@ enum version_8188e { VERSION_UNKNOWN = 0xFF, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, -}; - enum rtl819x_loopback_e { RTL819X_NO_LOOPBACK = 0, RTL819X_MAC_LOOPBACK = 1, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h index 60f5728b4e2d..9f7e7bb8610b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h @@ -47,14 +47,6 @@ enum version_8192e { VERSION_UNKNOWN = 0xFF, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h index 025ea5c0f3f6..5e5403d69220 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h @@ -38,14 +38,6 @@ /* Currently only for RTL8723B */ #define EXT_VENDOR_ID (BIT(18) | BIT(19)) -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index dfbdf539de1a..498f716bfc73 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h @@ -332,14 +332,6 @@ enum rtl_desc_qsel { QSLT_CMD = 0x13, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - struct phy_sts_cck_8821ae_t { u8 adc_pwdb_X[4]; u8 sq_rpt; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 1259d2b66d17..81ac036760fc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1010,6 +1010,14 @@ enum dm_info_query { DM_INFO_SIZE, }; +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, +}; + struct rtlwifi_tx_info { int sn; unsigned long send_time; -- cgit v1.2.3 From 249155973f524a284a86582ff9a915be045ab760 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:29:57 +0800 Subject: rtlwifi: rename register-based C2H command IDs to V0 Current chips use packet-based C2H commands whose IDs differ from old ones, so this commit simply gives C2H_V0_ as prefix of command IDs. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c | 6 +++--- .../net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index ec9bcf32f0ab..788de88ab1ee 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -1749,13 +1749,13 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw) switch (c2h_event.cmd_id) { - case C2H_BT_RSSI: + case C2H_V0_BT_RSSI: break; - case C2H_BT_OP_MODE: + case C2H_V0_BT_OP_MODE: break; - case BT_INFO: + case C2H_V0_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h index 3723d7476717..756868897d8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h @@ -130,17 +130,17 @@ enum bt_state { BT_INFO_STATE_MAX = 7 }; -enum rtl8723e_c2h_evt { - C2H_DBG = 0, - C2H_TSF = 1, - C2H_AP_RPT_RSP = 2, +enum rtl8723e_c2h_evt_v0 { + C2H_V0_DBG = 0, + C2H_V0_TSF = 1, + C2H_V0_AP_RPT_RSP = 2, /* The FW notify the report of the specific tx packet. */ - C2H_CCX_TX_RPT = 3, - C2H_BT_RSSI = 4, - C2H_BT_OP_MODE = 5, - C2H_HW_INFO_EXCH = 10, - C2H_C2H_H2C_TEST = 11, - BT_INFO = 12, + C2H_V0_CCX_TX_RPT = 3, + C2H_V0_BT_RSSI = 4, + C2H_V0_BT_OP_MODE = 5, + C2H_V0_HW_INFO_EXCH = 10, + C2H_V0_C2H_H2C_TEST = 11, + C2H_V0_BT_INFO = 12, MAX_C2HEVENT }; -- cgit v1.2.3 From 7aeb100b7d70973e1478f472b0a2fc569637ec8d Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:29:58 +0800 Subject: rtlwifi: remove duplicate C2H definition Move C2H definition to wifi.h, because the definitions of 8192ee, 8723be and 8821ae are the same. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 16 ++++++++-------- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h | 11 ----------- drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 12 ++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h | 10 ---------- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 12 ++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h | 14 -------------- drivers/net/wireless/realtek/rtlwifi/wifi.h | 19 +++++++++++++++++++ 7 files changed, 39 insertions(+), 55 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index f9563ae301ad..25d6c32f66c3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -892,34 +892,34 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { - case C2H_8192E_DBG: + case C2H_DBG: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_DBG!!\n"); break; - case C2H_8192E_TXBF: + case C2H_TXBF: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8192E_TXBF!!\n"); break; - case C2H_8192E_TX_REPORT: + case C2H_TX_REPORT: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE , "[C2H], C2H_8723BE_TX_REPORT!\n"); rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); break; - case C2H_8192E_BT_INFO: + case C2H_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, c2h_cmd_len); break; - case C2H_8192E_BT_MP: + case C2H_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, c2h_cmd_len); break; - case C2H_8192E_RA_RPT: + case C2H_RA_RPT: _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); break; default: @@ -948,8 +948,8 @@ void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); switch (c2h_cmd_id) { - case C2H_8192E_BT_INFO: - case C2H_8192E_BT_MP: + case C2H_BT_INFO: + case C2H_BT_MP: rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); break; default: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index b770f722daa6..8325adaa9663 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -128,17 +128,6 @@ enum rtl8192e_h2c_cmd { MAX_92E_H2CCMD }; -enum rtl8192e_c2h_evt { - C2H_8192E_DBG = 0, - C2H_8192E_LB = 1, - C2H_8192E_TXBF = 2, - C2H_8192E_TX_REPORT = 3, - C2H_8192E_BT_INFO = 9, - C2H_8192E_BT_MP = 11, - C2H_8192E_RA_RPT = 12, - MAX_8192E_C2HEVENT -}; - #define pagenum_128(_len) \ (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0)) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 4b963fd27d64..34703b9cf5e8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -712,23 +712,23 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { - case C2H_8723B_DBG: + case C2H_DBG: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_DBG!!\n"); break; - case C2H_8723B_TX_REPORT: + case C2H_TX_REPORT: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_TX_REPORT!\n"); rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); break; - case C2H_8723B_BT_INFO: + case C2H_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, c2h_cmd_len); break; - case C2H_8723B_BT_MP: + case C2H_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) @@ -761,8 +761,8 @@ void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); switch (c2h_cmd_id) { - case C2H_8723B_BT_INFO: - case C2H_8723B_BT_MP: + case C2H_BT_INFO: + case C2H_BT_MP: rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 2de4edb62bca..8f49941028f4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -100,16 +100,6 @@ enum rtl8723b_h2c_cmd { MAX_8723B_H2CCMD }; -enum rtl8723b_c2h_evt { - C2H_8723B_DBG = 0, - C2H_8723B_LB = 1, - C2H_8723B_TXBF = 2, - C2H_8723B_TX_REPORT = 3, - C2H_8723B_BT_INFO = 9, - C2H_8723B_BT_MP = 11, - MAX_8723B_C2HEVENT -}; - #define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index f2b2c549e5b2..5c7e58fbc07e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1926,23 +1926,23 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { - case C2H_8812_DBG: + case C2H_DBG: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_DBG!!\n"); break; - case C2H_8812_TX_REPORT: + case C2H_TX_REPORT: rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); break; - case C2H_8812_RA_RPT: + case C2H_RA_RPT: rtl8821ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); break; - case C2H_8812_BT_INFO: + case C2H_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, c2h_cmd_len); break; - case C2H_8812_BT_MP: + case C2H_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8812_BT_MP!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) @@ -1974,7 +1974,7 @@ void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); switch (c2h_cmd_id) { - case C2H_8812_BT_INFO: + case C2H_BT_INFO: rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 32d46d7128f5..48c72a26c7e8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -137,20 +137,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -enum rtl8812_c2h_evt { - C2H_8812_DBG = 0, - C2H_8812_LB = 1, - C2H_8812_TXBF = 2, - C2H_8812_TX_REPORT = 3, - C2H_8812_BT_INFO = 9, - C2H_8812_BT_MP = 11, - C2H_8812_RA_RPT = 12, - - C2H_8812_FW_SWCHNL = 0x10, - C2H_8812_IQK_FINISH = 0x11, - MAX_8812_C2HEVENT -}; - enum rtl8821a_h2c_cmd { H2C_8821AE_RSVDPAGE = 0, H2C_8821AE_MSRRPT = 1, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 81ac036760fc..3da8e2a6084a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -158,6 +158,25 @@ enum { H2C_BT_PORT_ID = 0x71, }; +enum rtl_c2h_evt_v1 { + C2H_DBG = 0, + C2H_LB = 1, + C2H_TXBF = 2, + C2H_TX_REPORT = 3, + C2H_BT_INFO = 9, + C2H_BT_MP = 11, + C2H_RA_RPT = 12, + + C2H_FW_SWCHNL = 0x10, + C2H_IQK_FINISH = 0x11, + + C2H_EXT_V2 = 0xFF, +}; + +enum rtl_c2h_evt_v2 { + C2H_V2_CCX_RPT = 0x0F, +}; + #define GET_TX_REPORT_SN_V1(c2h) (c2h[6]) #define GET_TX_REPORT_ST_V1(c2h) (c2h[0] & 0xC0) #define GET_TX_REPORT_RETRY_V1(c2h) (c2h[2] & 0x3F) -- cgit v1.2.3 From 16e78cb4a571129bbef5457c68856ff3d3895753 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:29:59 +0800 Subject: rtlwifi: remove unused fw C2H command ID The IDs are defined by driver and map to the fw C2H IDs, but they aren't used now result in removal. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8188ee/def.h | 25 ---------------------- .../net/wireless/realtek/rtlwifi/rtl8192ce/def.h | 25 ---------------------- .../net/wireless/realtek/rtlwifi/rtl8723ae/def.h | 25 ---------------------- .../net/wireless/realtek/rtlwifi/rtl8821ae/def.h | 25 ---------------------- 4 files changed, 100 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index 32492d64d685..45c866d3ca88 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h @@ -176,31 +176,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_READ_MACREG = 0, - HAL_FW_C2H_CMD_READ_BBREG = 1, - HAL_FW_C2H_CMD_READ_RFREG = 2, - HAL_FW_C2H_CMD_READ_EEPROM = 3, - HAL_FW_C2H_CMD_READ_EFUSE = 4, - HAL_FW_C2H_CMD_READ_CAM = 5, - HAL_FW_C2H_CMD_GET_BASICRATE = 6, - HAL_FW_C2H_CMD_GET_DATARATE = 7, - HAL_FW_C2H_CMD_SURVEY = 8, - HAL_FW_C2H_CMD_SURVEYDONE = 9, - HAL_FW_C2H_CMD_JOINBSS = 10, - HAL_FW_C2H_CMD_ADDSTA = 11, - HAL_FW_C2H_CMD_DELSTA = 12, - HAL_FW_C2H_CMD_ATIMDONE = 13, - HAL_FW_C2H_CMD_TX_REPORT = 14, - HAL_FW_C2H_CMD_CCX_REPORT = 15, - HAL_FW_C2H_CMD_DTM_REPORT = 16, - HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index b90aaf128072..d2005d7e9ad2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h @@ -142,31 +142,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_Read_MACREG = 0, - HAL_FW_C2H_CMD_Read_BBREG = 1, - HAL_FW_C2H_CMD_Read_RFREG = 2, - HAL_FW_C2H_CMD_Read_EEPROM = 3, - HAL_FW_C2H_CMD_Read_EFUSE = 4, - HAL_FW_C2H_CMD_Read_CAM = 5, - HAL_FW_C2H_CMD_Get_BasicRate = 6, - HAL_FW_C2H_CMD_Get_DataRate = 7, - HAL_FW_C2H_CMD_Survey = 8, - HAL_FW_C2H_CMD_SurveyDone = 9, - HAL_FW_C2H_CMD_JoinBss = 10, - HAL_FW_C2H_CMD_AddSTA = 11, - HAL_FW_C2H_CMD_DelSTA = 12, - HAL_FW_C2H_CMD_AtimDone = 13, - HAL_FW_C2H_CMD_TX_Report = 14, - HAL_FW_C2H_CMD_CCX_Report = 15, - HAL_FW_C2H_CMD_DTM_Report = 16, - HAL_FW_C2H_CMD_TX_Rate_Statistics = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h index bcdf2273688e..847544817549 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h @@ -152,31 +152,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_Read_MACREG = 0, - HAL_FW_C2H_CMD_Read_BBREG = 1, - HAL_FW_C2H_CMD_Read_RFREG = 2, - HAL_FW_C2H_CMD_Read_EEPROM = 3, - HAL_FW_C2H_CMD_Read_EFUSE = 4, - HAL_FW_C2H_CMD_Read_CAM = 5, - HAL_FW_C2H_CMD_Get_BasicRate = 6, - HAL_FW_C2H_CMD_Get_DataRate = 7, - HAL_FW_C2H_CMD_Survey = 8, - HAL_FW_C2H_CMD_SurveyDone = 9, - HAL_FW_C2H_CMD_JoinBss = 10, - HAL_FW_C2H_CMD_AddSTA = 11, - HAL_FW_C2H_CMD_DelSTA = 12, - HAL_FW_C2H_CMD_AtimDone = 13, - HAL_FW_C2H_CMD_TX_Report = 14, - HAL_FW_C2H_CMD_CCX_Report = 15, - HAL_FW_C2H_CMD_DTM_Report = 16, - HAL_FW_C2H_CMD_TX_Rate_Statistics = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index 498f716bfc73..3fe3aaa5fe3c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h @@ -296,31 +296,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_READ_MACREG = 0, - HAL_FW_C2H_CMD_READ_BBREG = 1, - HAL_FW_C2H_CMD_READ_RFREG = 2, - HAL_FW_C2H_CMD_READ_EEPROM = 3, - HAL_FW_C2H_CMD_READ_EFUSE = 4, - HAL_FW_C2H_CMD_READ_CAM = 5, - HAL_FW_C2H_CMD_GET_BASICRATE = 6, - HAL_FW_C2H_CMD_GET_DATARATE = 7, - HAL_FW_C2H_CMD_SURVEY = 8, - HAL_FW_C2H_CMD_SURVEYDONE = 9, - HAL_FW_C2H_CMD_JOINBSS = 10, - HAL_FW_C2H_CMD_ADDSTA = 11, - HAL_FW_C2H_CMD_DELSTA = 12, - HAL_FW_C2H_CMD_ATIMDONE = 13, - HAL_FW_C2H_CMD_TX_REPORT = 14, - HAL_FW_C2H_CMD_CCX_REPORT = 15, - HAL_FW_C2H_CMD_DTM_REPORT = 16, - HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, -- cgit v1.2.3 From ff77960d62d220a0f16433e9c2664a8759f95c3a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:00 +0800 Subject: rtlwifi: remove dummy hal_op rx_command_packet from rtl8188ee and rtl8723ae The caller of hal_op rx_command_packet will assert function pointer before calling, so we can remove dummy functions safely. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 2 -- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 7 ------- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h | 4 ---- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 1 - drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 7 ------- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h | 3 --- 6 files changed, 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 82681b96ef93..8c15ffd3568b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -263,8 +263,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = { .get_rfreg = rtl88e_phy_query_rf_reg, .set_rfreg = rtl88e_phy_set_rf_reg, .get_btc_status = rtl88e_get_btc_status, - .rx_command_packet = rtl88ee_rx_command_packet, - }; static struct rtl_mod_params rtl88ee_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 9670732b2bc6..4c1f8b08fc10 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -850,10 +850,3 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - return 0; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index f902d6769aa8..127ba977206f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -790,8 +790,4 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); - #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index a545ea317323..07b82700d1de 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -260,7 +260,6 @@ static struct rtl_hal_ops rtl8723e_hal_ops = { .bt_coex_off_before_lps = rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps, .get_btc_status = rtl8723e_get_btc_status, - .rx_command_packet = rtl8723e_rx_command_packet, .is_fw_header = is_fw_header, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 23485602a9a1..d461d0c9631f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -709,10 +709,3 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - return 0; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 985ce0b77ea5..d592b08d4ac8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -716,7 +716,4 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif -- cgit v1.2.3 From 16cefa449c9ced7006860d5427b9595782248ca4 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:01 +0800 Subject: rtlwifi: Add hal_op c2h_ra_report_handler for special process We're going to merge C2H handler into one, but one special case is to handle RA_REPORT that implements in individual IC folder. So this commit adds a hal_op for caller in common code. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 7 ++++--- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 1 + drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 5 +++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 1 + drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 ++ 7 files changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 25d6c32f66c3..a2d9e217bc65 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -876,8 +876,8 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) (u8 *)p2p_ps_offload); } -static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, - u8 *cmd_buf, u8 cmd_len) +void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len) { u8 rate = cmd_buf[0] & 0x3F; bool collision_state = cmd_buf[3] & BIT(0); @@ -889,6 +889,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { @@ -920,7 +921,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, c2h_cmd_len); break; case C2H_RA_RPT: - _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); + hal_ops->c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); break; default: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index 8325adaa9663..e5fae0a86a27 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -182,4 +182,6 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf); +void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index ef92a789871d..c5c26b537cd2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -252,6 +252,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .get_btc_status = rtl92ee_get_btc_status, .rx_command_packet = rtl92ee_rx_command_packet, .c2h_content_parsing = rtl92ee_c2h_content_parsing, + .c2h_ra_report_handler = rtl92ee_c2h_ra_report_handler, }; static struct rtl_mod_params rtl92ee_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 5c7e58fbc07e..bf37c428c682 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1907,7 +1907,7 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } -static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, +void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, u8 *cmd_buf, u8 cmd_len) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1923,6 +1923,7 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { @@ -1933,7 +1934,7 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); break; case C2H_RA_RPT: - rtl8821ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); + hal_ops->c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); break; case C2H_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 48c72a26c7e8..a69448be7c26 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -322,4 +322,6 @@ void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf); +void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 9bb3d9dfce79..4b2cb6bda4f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -305,6 +305,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .get_btc_status = rtl8821ae_get_btc_status, .rx_command_packet = rtl8821ae_rx_command_packet, .c2h_content_parsing = rtl8821ae_c2h_content_parsing, + .c2h_ra_report_handler = rtl8821ae_c2h_ra_report_handler, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 3da8e2a6084a..392b3e84db7e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2348,6 +2348,8 @@ struct rtl_hal_ops { u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx); void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); + void (*c2h_ra_report_handler)(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); }; struct rtl_intf_ops { -- cgit v1.2.3 From daf026ae5fbe094e63bb729d437b042a0a5fd798 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:02 +0800 Subject: rtlwifi: remove duplicate C2H handler Merge duplicate C2H handler and implement the handler in base.c. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 72 +++++++++++++++++++++ drivers/net/wireless/realtek/rtlwifi/base.h | 3 + .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 75 ---------------------- .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.h | 3 - .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 69 -------------------- .../net/wireless/realtek/rtlwifi/rtl8723be/fw.h | 3 - .../net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 3 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 68 -------------------- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.h | 5 -- .../net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 2 +- 14 files changed, 81 insertions(+), 230 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 759a802ccbee..0d03e98f9cb4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2306,6 +2306,78 @@ label_err: } EXPORT_SYMBOL(rtl_c2hcmd_enqueue); +void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 cmd_id, + u8 cmd_len, u8 *cmd_buf) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; + const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; + + switch (cmd_id) { + case C2H_DBG: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n"); + break; + case C2H_TXBF: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_TXBF!!\n"); + break; + case C2H_TX_REPORT: + rtl_tx_report_handler(hw, cmd_buf, cmd_len); + break; + case C2H_RA_RPT: + if (hal_ops->c2h_ra_report_handler) + hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len); + break; + case C2H_BT_INFO: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_BT_INFO!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len); + break; + case C2H_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len); + break; + default: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id); + break; + } +} +EXPORT_SYMBOL_GPL(rtl_c2h_content_parsing); + +void rtl_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; + u8 *tmp_buf = NULL; + + c2h_cmd_id = buffer[0]; + c2h_cmd_seq = buffer[1]; + c2h_cmd_len = len - 2; + tmp_buf = buffer + 2; + + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", + c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); + + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); + + switch (c2h_cmd_id) { + case C2H_BT_INFO: + case C2H_BT_MP: + rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + break; + default: + rtl_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); + break; + } +} +EXPORT_SYMBOL_GPL(rtl_c2h_packet_handler); + void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 19e7477839e4..3bf174e5b07e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -162,6 +162,9 @@ void rtl_fwevt_wq_callback(void *data); void rtl_c2hcmd_wq_callback(void *data); void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec); void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); +void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf); +void rtl_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index, enum wireless_mode wirelessmode); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index a2d9e217bc65..84a0d0eb72e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -884,78 +884,3 @@ void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state); } - -void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_DBG!!\n"); - break; - case C2H_TXBF: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8192E_TXBF!!\n"); - break; - case C2H_TX_REPORT: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE , - "[C2H], C2H_8723BE_TX_REPORT!\n"); - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_RA_RPT: - hal_ops->c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - default: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id); - break; - } -} - -void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = len - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_BT_INFO: - case C2H_BT_MP: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - default: - rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index e5fae0a86a27..6a2fbf20d579 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -179,9 +179,6 @@ void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); -void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); -void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf); void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index c5c26b537cd2..fd028274c593 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -251,7 +251,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, .get_btc_status = rtl92ee_get_btc_status, .rx_command_packet = rtl92ee_rx_command_packet, - .c2h_content_parsing = rtl92ee_c2h_content_parsing, + .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl92ee_c2h_ra_report_handler, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 852a2701ef55..e525c2bb4457 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -1085,7 +1085,7 @@ u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, result = 0; break; case C2H_PACKET: - rtl92ee_c2h_packet_handler(hw, skb->data, (u8)skb->len); + rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); result = 1; break; default: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 34703b9cf5e8..f2441fbb92f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -703,72 +703,3 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, rtl8723be_fill_h2c_cmd(hw, H2C_8723B_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } - -void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_DBG!!\n"); - break; - case C2H_TX_REPORT: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_TX_REPORT!\n"); - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - default: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id); - break; - } -} - -void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = len - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_BT_INFO: - case C2H_BT_MP: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - - default: - rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 8f49941028f4..948d28646364 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -143,7 +143,4 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); -void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); -void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 6a42988aad65..9df994965c4a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -263,7 +263,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .get_btc_status = rtl8723be_get_btc_status, .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, - .c2h_content_parsing = rtl8723be_c2h_content_parsing, + .c2h_content_parsing = rtl_c2h_content_parsing, }; static struct rtl_mod_params rtl8723be_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index deb8f9501b51..0c0cece9d5f9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -774,8 +774,7 @@ u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, result = 0; break; case C2H_PACKET: - rtl8723be_c2h_packet_handler(hw, skb->data, - (u8)skb->len); + rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); result = 1; break; default: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index bf37c428c682..d868a034659f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1917,71 +1917,3 @@ void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, rtl8821ae_dm_update_init_rate(hw, rate); } - -void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, u8 c2h_cmd_len, - u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_DBG!!\n"); - break; - case C2H_TX_REPORT: - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_RA_RPT: - hal_ops->c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H], C2H_8812_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8812_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - default: - break; - } -} - -void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, - u8 length) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = length - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_BT_INFO: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - - default: - rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index a69448be7c26..99c902ff0b84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -317,11 +317,6 @@ void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw, bool func_en); void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw, bool enabled); void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw); -void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, - u8 *buffer, u8 length); -void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, u8 c2h_cmd_len, - u8 *tmp_buf); void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 4b2cb6bda4f1..bcabf4dc6353 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -304,7 +304,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, .rx_command_packet = rtl8821ae_rx_command_packet, - .c2h_content_parsing = rtl8821ae_c2h_content_parsing, + .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl8821ae_c2h_ra_report_handler, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 63fb80039f82..a93d8569c186 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -1018,7 +1018,7 @@ u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, result = 0; break; case C2H_PACKET: - rtl8821ae_c2h_packet_handler(hw, skb->data, (u8)skb->len); + rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); result = 1; RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, "skb->len=%d\n\n", skb->len); -- cgit v1.2.3 From 23ffab177b1da1564c14214f290925a77f0b3592 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:03 +0800 Subject: rtlwifi: remove hal_op rx_command_packet Because the hal_op rx_command_packet does C2H handler if rx packet type is C2H, and the handler have been moved to base.c so we can call the handler directly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.c | 4 ++-- .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 24 -------------------- .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.h | 3 --- .../net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 24 -------------------- .../net/wireless/realtek/rtlwifi/rtl8723be/trx.h | 3 --- .../net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 26 ---------------------- .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.h | 3 --- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 -- 11 files changed, 2 insertions(+), 90 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index d0c509ef790e..dd51c67c09fa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -830,8 +830,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) goto new_trx_end; } /* handle command packet here */ - if (rtlpriv->cfg->ops->rx_command_packet && - rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) { + if (stats.packet_report_type == C2H_PACKET) { + rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); dev_kfree_skb_any(skb); goto new_trx_end; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index fd028274c593..5b67ad748d67 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -250,7 +250,6 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .set_rfreg = rtl92ee_phy_set_rf_reg, .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, .get_btc_status = rtl92ee_get_btc_status, - .rx_command_packet = rtl92ee_rx_command_packet, .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl92ee_c2h_ra_report_handler, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index e525c2bb4457..14d6e3fc5767 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -1072,27 +1072,3 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { } - -u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); - result = 1; - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, - "Unknown packet type %d\n", status->packet_report_type); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 48c16fff20c6..45df3e79f490 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -762,7 +762,4 @@ void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 9df994965c4a..a41e67b3f38b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -261,7 +261,6 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .set_rfreg = rtl8723be_phy_set_rf_reg, .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, .get_btc_status = rtl8723be_get_btc_status, - .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, .c2h_content_parsing = rtl_c2h_content_parsing, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 0c0cece9d5f9..9f8dfb5af774 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -761,27 +761,3 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); - result = 1; - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, - "No this packet type!!\n"); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 988bf0586674..609f7ad7f787 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -632,7 +632,4 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index bcabf4dc6353..8ff8a406db52 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -303,7 +303,6 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .set_rfreg = rtl8821ae_phy_set_rf_reg, .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, - .rx_command_packet = rtl8821ae_rx_command_packet, .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl8821ae_c2h_ra_report_handler, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index a93d8569c186..d7960dd5bf1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -1005,29 +1005,3 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); - result = 1; - RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, - "skb->len=%d\n\n", skb->len); - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, - "No this packet type!!\n"); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 221dd2b29d3b..4ff0968dba81 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -628,7 +628,4 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 392b3e84db7e..b40d8f5bbdce 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2340,8 +2340,6 @@ struct rtl_hal_ops { void (*set_default_port_id_cmd)(struct ieee80211_hw *hw); bool (*get_btc_status) (void); bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); - u32 (*rx_command_packet)(struct ieee80211_hw *hw, - const struct rtl_stats *status, struct sk_buff *skb); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, struct rtl_wow_pattern *rtl_pattern, u8 index); -- cgit v1.2.3 From b4f6ee489cff1e9c8637996b7c1079e8bdd979df Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:04 +0800 Subject: rtlwifi: remove hal_op c2h_content_parsing Similar to rx_command_packet, we can call rtl_c2h_content_parsing so the hal_op isn't necessary. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 7 +++---- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 1 - drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 1 - drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 1 - drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 -- 5 files changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 0d03e98f9cb4..927b7d231576 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2346,7 +2346,6 @@ void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 cmd_id, break; } } -EXPORT_SYMBOL_GPL(rtl_c2h_content_parsing); void rtl_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) { @@ -2401,9 +2400,9 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) if (!c2hcmd) break; - if (rtlpriv->cfg->ops->c2h_content_parsing && exec) - rtlpriv->cfg->ops->c2h_content_parsing(hw, - c2hcmd->tag, c2hcmd->len, c2hcmd->val); + if (exec) + rtl_c2h_content_parsing(hw, c2hcmd->tag, + c2hcmd->len, c2hcmd->val); /* free */ kfree(c2hcmd->val); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 5b67ad748d67..9ea62599ecbb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -250,7 +250,6 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .set_rfreg = rtl92ee_phy_set_rf_reg, .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, .get_btc_status = rtl92ee_get_btc_status, - .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl92ee_c2h_ra_report_handler, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index a41e67b3f38b..c9f7b042d9c6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -262,7 +262,6 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, .get_btc_status = rtl8723be_get_btc_status, .is_fw_header = is_fw_header, - .c2h_content_parsing = rtl_c2h_content_parsing, }; static struct rtl_mod_params rtl8723be_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 8ff8a406db52..77f6401021c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -303,7 +303,6 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .set_rfreg = rtl8821ae_phy_set_rf_reg, .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, - .c2h_content_parsing = rtl_c2h_content_parsing, .c2h_ra_report_handler = rtl8821ae_c2h_ra_report_handler, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index b40d8f5bbdce..930e1ec2280f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2344,8 +2344,6 @@ struct rtl_hal_ops { struct rtl_wow_pattern *rtl_pattern, u8 index); u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx); - void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len, - u8 *val); void (*c2h_ra_report_handler)(struct ieee80211_hw *hw, u8 *cmd_buf, u8 cmd_len); }; -- cgit v1.2.3 From 9ae6ed271a60f0e4476fe94f0efc5aba184a9c57 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:05 +0800 Subject: rtlwifi: use sk_buff to queue C2H commands We use 'struct rtl_c2hcmd' to store C2H commands originally, and the code is slightly complex to enqueue and dequeue and also wastes time to allocate and memcpy data. Since C2H commands are asynchronous events, they can be processed in work queue, so RX ISR enqueues C2H result in removal of rtl_c2h_packet_handler(). Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 96 +++++++---------------------- drivers/net/wireless/realtek/rtlwifi/base.h | 5 +- drivers/net/wireless/realtek/rtlwifi/pci.c | 3 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- 4 files changed, 25 insertions(+), 81 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 927b7d231576..61f12f86fcd4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -571,9 +571,9 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.iqk_lock); /* <5> init list */ INIT_LIST_HEAD(&rtlpriv->entry_list); - INIT_LIST_HEAD(&rtlpriv->c2hcmd_list); INIT_LIST_HEAD(&rtlpriv->scan_list.list); skb_queue_head_init(&rtlpriv->tx_report.queue); + skb_queue_head_init(&rtlpriv->c2hcmd_queue); rtlmac->link_state = MAC80211_NOLINK; @@ -2262,56 +2262,36 @@ void rtl_fwevt_wq_callback(void *data) rtlpriv->cfg->ops->c2h_command_handle(hw); } -void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val) +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned long flags; - struct rtl_c2hcmd *c2hcmd; - - c2hcmd = kmalloc(sizeof(*c2hcmd), - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); - - if (!c2hcmd) - goto label_err; - - c2hcmd->val = kmalloc(len, - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); - - if (!c2hcmd->val) - goto label_err2; - - /* fill data */ - c2hcmd->tag = tag; - c2hcmd->len = len; - memcpy(c2hcmd->val, val, len); /* enqueue */ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list); + __skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); /* wake up wq */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0); - - return; - -label_err2: - kfree(c2hcmd); - -label_err: - RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING, - "C2H cmd enqueue fail.\n"); } EXPORT_SYMBOL(rtl_c2hcmd_enqueue); -void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 cmd_id, - u8 cmd_len, u8 *cmd_buf) +static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; + u8 cmd_id, cmd_seq, cmd_len; + u8 *cmd_buf = NULL; + + cmd_id = skb->data[0]; + cmd_seq = skb->data[1]; + cmd_len = skb->len - 2; + cmd_buf = skb->data + 2; switch (cmd_id) { case C2H_DBG: @@ -2347,67 +2327,35 @@ void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 cmd_id, } } -void rtl_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = len - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_BT_INFO: - case C2H_BT_MP: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - default: - rtl_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - } -} -EXPORT_SYMBOL_GPL(rtl_c2h_packet_handler); - void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct sk_buff *skb; unsigned long flags; - struct rtl_c2hcmd *c2hcmd; int i; for (i = 0; i < 200; i++) { /* dequeue a task */ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list, - struct rtl_c2hcmd, list); - - if (c2hcmd) - list_del(&c2hcmd->list); + skb = __skb_dequeue(&rtlpriv->c2hcmd_queue); spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); /* do it */ - if (!c2hcmd) + if (!skb) break; + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n", + *((u8 *)skb->cb)); + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG, + "C2H data: ", skb->data, skb->len); + if (exec) - rtl_c2h_content_parsing(hw, c2hcmd->tag, - c2hcmd->len, c2hcmd->val); + rtl_c2h_content_parsing(hw, skb); /* free */ - kfree(c2hcmd->val); - - kfree(c2hcmd); + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 3bf174e5b07e..912f205779c3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -161,10 +161,7 @@ void rtl_watchdog_wq_callback(void *data); void rtl_fwevt_wq_callback(void *data); void rtl_c2hcmd_wq_callback(void *data); void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec); -void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); -void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf); -void rtl_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb); u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index, enum wireless_mode wirelessmode); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index dd51c67c09fa..ae13bcfb3bf0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -831,8 +831,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) } /* handle command packet here */ if (stats.packet_report_type == C2H_PACKET) { - rtl_c2h_packet_handler(hw, skb->data, (u8)skb->len); - dev_kfree_skb_any(skb); + rtl_c2hcmd_enqueue(hw, skb); goto new_trx_end; } diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 930e1ec2280f..9e620b943f8c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2791,7 +2791,7 @@ struct rtl_priv { struct list_head entry_list; /* c2hcmd list for kthread level access */ - struct list_head c2hcmd_list; + struct sk_buff_head c2hcmd_queue; struct rtl_debug dbg; int max_fw_size; -- cgit v1.2.3 From 9644032e307022ecb1a436537cacedb91d569d98 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:06 +0800 Subject: rtlwifi: access skb->data to get C2H data by macro The format of C2H data is ID(1 byte) + Length(1 byte) + value, and it is more readable to use macros to access C2H data. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/wifi.h | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 61f12f86fcd4..a5939ddfa9cb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2288,10 +2288,10 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, u8 cmd_id, cmd_seq, cmd_len; u8 *cmd_buf = NULL; - cmd_id = skb->data[0]; - cmd_seq = skb->data[1]; - cmd_len = skb->len - 2; - cmd_buf = skb->data + 2; + cmd_id = GET_C2H_CMD_ID(skb->data); + cmd_seq = GET_C2H_SEQ(skb->data); + cmd_len = skb->len - C2H_DATA_OFFSET; + cmd_buf = GET_C2H_DATA_PTR(skb->data); switch (cmd_id) { case C2H_DBG: diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 9e620b943f8c..0f3b98c5227f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -177,6 +177,11 @@ enum rtl_c2h_evt_v2 { C2H_V2_CCX_RPT = 0x0F, }; +#define GET_C2H_CMD_ID(c2h) ({u8 *__c2h = c2h; __c2h[0]; }) +#define GET_C2H_SEQ(c2h) ({u8 *__c2h = c2h; __c2h[1]; }) +#define C2H_DATA_OFFSET 2 +#define GET_C2H_DATA_PTR(c2h) ({u8 *__c2h = c2h; &__c2h[C2H_DATA_OFFSET]; }) + #define GET_TX_REPORT_SN_V1(c2h) (c2h[6]) #define GET_TX_REPORT_ST_V1(c2h) (c2h[0] & 0xC0) #define GET_TX_REPORT_RETRY_V1(c2h) (c2h[2] & 0x3F) -- cgit v1.2.3 From 0a9f8f0a1ba9688a9ff5f6b83c4cc1eecd2fc9f2 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 May 2018 17:30:07 +0800 Subject: rtlwifi: fix btmpinfo timeout while processing C2H_BT_INFO In former patch, I enqueu all C2H commands and processed by a workqueue. In case C2H_BT_INFO will issue a H2C command to set BT reg, and wait for a C2H ack. But it is totally impossible that C2H workqueue waits for a C2H command, so kernel log warn rtlwifi: :<0> btmpinfo wait (req_num=0) timeout Since the C2H ack command C2H_BT_MP can be safely processed in interrupt context, add a fast command path to deal with the command. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index a5939ddfa9cb..39c817eddd78 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2262,11 +2262,33 @@ void rtl_fwevt_wq_callback(void *data) rtlpriv->cfg->ops->c2h_command_handle(hw); } +static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, + struct sk_buff *skb); + +static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + u8 cmd_id = GET_C2H_CMD_ID(skb->data); + + switch (cmd_id) { + case C2H_BT_MP: + return true; + default: + break; + } + + return false; +} + void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned long flags; + if (rtl_c2h_fast_cmd(hw, skb)) { + rtl_c2h_content_parsing(hw, skb); + return; + } + /* enqueue */ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); -- cgit v1.2.3 From 88027c8ff0a3f87d5d06d53ee25a41b90d8bccec Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 23 May 2018 18:34:45 +0800 Subject: atmel: Add missing call to pci_disable_device() add pci_disable_device in error handling while init_atmel_card failed. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/atmel_pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/atmel/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c index bcf1f274a251..30df58a41a83 100644 --- a/drivers/net/wireless/atmel/atmel_pci.c +++ b/drivers/net/wireless/atmel/atmel_pci.c @@ -61,8 +61,10 @@ static int atmel_pci_probe(struct pci_dev *pdev, dev = init_atmel_card(pdev->irq, pdev->resource[1].start, ATMEL_FW_TYPE_506, &pdev->dev, NULL, NULL); - if (!dev) + if (!dev) { + pci_disable_device(pdev); return -ENODEV; + } pci_set_drvdata(pdev, dev); return 0; -- cgit v1.2.3 From 6e91d48371e79862ea2c05867aaebe4afe55a865 Mon Sep 17 00:00:00 2001 From: Eyal Reizer Date: Mon, 28 May 2018 11:36:42 +0300 Subject: wlcore: sdio: check for valid platform device data before suspend the wl pointer can be null In case only wlcore_sdio is probed while no WiLink module is successfully probed, as in the case of mounting a wl12xx module while using a device tree file configured with wl18xx related settings. In this case the system was crashing in wl1271_suspend() as platform device data is not set. Make sure wl the pointer is valid before using it. Signed-off-by: Eyal Reizer Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sdio.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 15d5ac126061..750bea3574ee 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -399,6 +399,11 @@ static int wl1271_suspend(struct device *dev) mmc_pm_flag_t sdio_flags; int ret = 0; + if (!wl) { + dev_err(dev, "no wilink module was probed\n"); + goto out; + } + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", wl->wow_enabled); -- cgit v1.2.3 From a53e8f3edaa0f28458f37dc5654e1912a8a00f26 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 26 May 2018 16:00:19 +0100 Subject: rsi: fix spelling mistake "Uknown" -> "Unknown" Trivial fix to spelling mistake in rsi_dbg message text Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index bfa7569c85bb..2ca7464b7fa3 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1103,7 +1103,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, break; default: - rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__); + rsi_dbg(ERR_ZONE, "%s: Unknown AMPDU action\n", __func__); break; } -- cgit v1.2.3 From 618fd1ed17d4cf193d85747c04a8b836b8fc107e Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Thu, 24 May 2018 16:36:28 +0530 Subject: mwifiex: avoid exporting mwifiex_send_cmd This is a follow-up patch for commit 21c5c83ce833 ("mwifiex: support sysfs initiated device coredump"). Let us avoid exporting mwifiex_send_cmd and instead use a utility function mwifiex_fw_dump_event to achive the work. Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 1 - drivers/net/wireless/marvell/mwifiex/main.h | 1 + drivers/net/wireless/marvell/mwifiex/usb.c | 5 ++--- drivers/net/wireless/marvell/mwifiex/util.c | 7 +++++++ 4 files changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 8be1e69657a7..9cfcdf6bec52 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -674,7 +674,6 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, return ret; } -EXPORT_SYMBOL_GPL(mwifiex_send_cmd); /* * This function queues a command to the command pending queue. diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 8ae74ed78805..69ac0a22c28c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1691,6 +1691,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); +void mwifiex_fw_dump_event(struct mwifiex_private *priv); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, int cmd_type, diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 7aa39878ce49..bc475b83bb15 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -658,9 +658,8 @@ static void mwifiex_usb_coredump(struct device *dev) struct usb_interface *intf = to_usb_interface(dev); struct usb_card_rec *card = usb_get_intfdata(intf); - mwifiex_send_cmd(mwifiex_get_priv(card->adapter, MWIFIEX_BSS_ROLE_ANY), - HostCmd_CMD_FW_DUMP_EVENT, HostCmd_ACT_GEN_SET, 0, - NULL, true); + mwifiex_fw_dump_event(mwifiex_get_priv(card->adapter, + MWIFIEX_BSS_ROLE_ANY)); } static struct usb_driver mwifiex_usb_driver = { diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 51ccf10f4413..6dd212898117 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -757,3 +757,10 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags) return skb; } EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf); + +void mwifiex_fw_dump_event(struct mwifiex_private *priv) +{ + mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT, HostCmd_ACT_GEN_SET, + 0, NULL, true); +} +EXPORT_SYMBOL_GPL(mwifiex_fw_dump_event); -- cgit v1.2.3 From b817047ae70c0bd67b677b65d0d69d72cd6e9728 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Thu, 24 May 2018 19:18:27 +0530 Subject: mwifiex: handle race during mwifiex_usb_disconnect Race condition is observed during rmmod of mwifiex_usb: 1. The rmmod thread will call mwifiex_usb_disconnect(), download SHUTDOWN command and do wait_event_interruptible_timeout(), waiting for response. 2. The main thread will handle the response and will do a wake_up_interruptible(), unblocking rmmod thread. 3. On getting unblocked, rmmod thread will make rx_cmd.urb = NULL in mwifiex_usb_free(). 4. The main thread will try to resubmit rx_cmd.urb in mwifiex_usb_submit_rx_urb(), which is NULL. To fix, wait for main thread to complete before calling mwifiex_usb_free(). Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index bc475b83bb15..6e3cf9817730 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) MWIFIEX_FUNC_SHUTDOWN); } + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + mwifiex_usb_free(card); mwifiex_dbg(adapter, FATAL, -- cgit v1.2.3 From ae30bdaa4c2747e96c880d909b48484f41bfd51d Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Sat, 26 May 2018 02:09:22 +0530 Subject: mwifiex: skip sending GT_REKEY_OFFLOAD_CFG if firmware has no support If firmware does not support embedded supplicant, then it in turn will not support GT rekey offloading. If this is the case, then driver must not advertise WOWLAN flags related to GTK rekey and it must also skip sending the GT_REKEY_OFFLOAD_CFG command. Signed-off-by: Shrenik Shikhare Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 18 +++++++++++++++++- drivers/net/wireless/marvell/mwifiex/fw.h | 1 + drivers/net/wireless/marvell/mwifiex/sta_event.c | 3 +++ 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 54a2297010d2..d04a59903e82 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3555,6 +3555,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + return -EOPNOTSUPP; + return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, HostCmd_ACT_GEN_SET, 0, data, true); } @@ -4190,6 +4193,16 @@ static const struct wiphy_wowlan_support mwifiex_wowlan_support = { .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, }; + +static const struct wiphy_wowlan_support mwifiex_wowlan_support_no_gtk = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, + .n_patterns = MWIFIEX_MEF_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, + .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, +}; #endif static bool mwifiex_is_valid_alpha2(const char *alpha2) @@ -4308,7 +4321,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) WIPHY_FLAG_TDLS_EXTERNAL_SETUP; #ifdef CONFIG_PM - wiphy->wowlan = &mwifiex_wowlan_support; + if (ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + wiphy->wowlan = &mwifiex_wowlan_support; + else + wiphy->wowlan = &mwifiex_wowlan_support_no_gtk; #endif wiphy->coalesce = &mwifiex_coalesce_support; diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index c5dc518f768b..b73f99dc5a72 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -249,6 +249,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) #define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25)) #define ISSUPP_RANDOM_MAC(FwCapInfo) (FwCapInfo & BIT(27)) +#define ISSUPP_FIRMWARE_SUPPLICANT(FwCapInfo) (FwCapInfo & BIT(21)) #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 93dfb76cd8a6..03a6492662ca 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -241,6 +241,9 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); + if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + return; + mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, HostCmd_ACT_GEN_REMOVE, 0, NULL, false); } -- cgit v1.2.3 From 666cc438f36918fb4ed5cab37f0cfaf14586b85e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 25 May 2018 16:38:54 -0500 Subject: mwifiex: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 ++++ drivers/net/wireless/marvell/mwifiex/scan.c | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index d04a59903e82..4cc58d79d837 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1158,6 +1158,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as IBSS\n", dev->name); + /* fall through */ case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ return 0; default: @@ -1188,6 +1189,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as STA\n", dev->name); + /* fall through */ case NL80211_IFTYPE_STATION: /* This shouldn't happen */ return 0; default: @@ -1210,6 +1212,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as AP\n", dev->name); + /* fall through */ case NL80211_IFTYPE_AP: /* This shouldn't happen */ return 0; default: @@ -1249,6 +1252,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as P2P\n", dev->name); + /* fall through */ case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return 0; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index d7ce7f75ae38..19df92b6668d 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1308,6 +1308,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, case WLAN_EID_CHANNEL_SWITCH: bss_entry->chan_sw_ie_present = true; + /* fall through */ case WLAN_EID_PWR_CAPABILITY: case WLAN_EID_TPC_REPORT: case WLAN_EID_QUIET: -- cgit v1.2.3 From 788f4e4cf063173c25e3f3ad81e7cee923686ab1 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 29 May 2018 09:42:08 +0800 Subject: mwifiex: increase log level for internal scan fail result Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4cc58d79d837..a67e2d66ac9d 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2262,7 +2262,7 @@ done: if (!bss) { if (is_scanning_required) { - mwifiex_dbg(priv->adapter, WARN, + mwifiex_dbg(priv->adapter, MSG, "assoc: requested bss not found in scan results\n"); break; } -- cgit v1.2.3 From db69f4e05bd5c83ea66b156fddd7cfc562092bbc Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 29 May 2018 09:42:09 +0800 Subject: mwifiex: reserve passive scan time for radar channel Active scan is not allowed on radar channel, instead using passvie scan with more time. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 19df92b6668d..895b806cdb03 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -482,7 +482,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, scan_chan_list[chan_idx].max_scan_time = cpu_to_le16((u16) user_scan_in-> chan_list[0].scan_time); - else if (ch->flags & IEEE80211_CHAN_NO_IR) + else if ((ch->flags & IEEE80211_CHAN_NO_IR) || + (ch->flags & IEEE80211_CHAN_RADAR)) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->passive_scan_time); else @@ -502,10 +503,12 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_DISABLE_CHAN_FILT; - if (filtered_scan) { + if (filtered_scan && + !((ch->flags & IEEE80211_CHAN_NO_IR) || + (ch->flags & IEEE80211_CHAN_RADAR))) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->specific_scan_time); - } + chan_idx++; } -- cgit v1.2.3 From 06895b1627fe36f0bea49d4c9fd0e27f0f7498b1 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 24 May 2018 13:54:50 -0500 Subject: rtlwifi: remove duplicate code Remove and refactor some code in order to avoid having identical code for different branches. Notice that the logic has been there since 2014. Addresses-Coverity-ID: 1426199 ("Identical code for different branches") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtc8723b2ant.c | 23 ++++------------------ 1 file changed, 4 insertions(+), 19 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 279fe01bb55e..df3facc8e5a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -2876,25 +2876,10 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); /* sw mechanism */ - if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } - } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } - } + if (wifi_bw == BTC_WIFI_BW_HT40) + btc8723b2ant_sw_mechanism(btcoexist, true, true, false, false); + else + btc8723b2ant_sw_mechanism(btcoexist, false, true, false, false); } /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ -- cgit v1.2.3 From 0513c083d1f460c30eae67867c0add557f7ca1b9 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Mon, 28 May 2018 12:15:44 +0300 Subject: iwlwifi: add csr configuration for 6300 devices Recently we have switched the csr addresses and values configuration from a single configuration to all devices to a per-device configuration. Doing that, the configuration for 6300 devices wasn't set. This missing definition introduced a kernel panic once trying to access the csr's. Add the missing 6300 csr configuration. While at it, add a checker that the csr values were indeed configured, and bail out more gracefully if not. Fixes: a8cbb46f831d ("iwlwifi: allow different csr flags for different device families") Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/6000.c | 1 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 51cec0bb75fc..dbcec7ce7863 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -373,6 +373,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .eeprom_params = &iwl6000_eeprom_params, .ht_params = &iwl6000_ht_params, .led_mode = IWL_LED_BLINK, + .csr = &iwl_csr_v1, }; MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 959de2f8bb28..38234bda9017 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -836,6 +836,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct iwl_trans *iwl_trans; int ret; + if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n")) + return -EINVAL; + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); -- cgit v1.2.3 From 6fd1cfc0f0a5fec92cce8a5c3fbfd7fd82b82160 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:25 -0700 Subject: nfp: return -EOPNOTSUPP from .ndo_get_phys_port_name for VFs After recent change we started returning 0 from ndo_get_phys_port_name for VFs. The name parameter for ndo_get_phys_port_name is not initialized by the stack so this can lead to a crash. We should have kept returning -EOPNOTSUPP in the first place. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index eea11e881bf5..1f572896d1ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3286,11 +3286,12 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) if (nn->port) return nfp_port_get_phys_port_name(netdev, name, len); - if (!nn->dp.is_vf) { - n = snprintf(name, len, "%d", nn->id); - if (n >= len) - return -EINVAL; - } + if (nn->dp.is_vf) + return -EOPNOTSUPP; + + n = snprintf(name, len, "%d", nn->id); + if (n >= len) + return -EINVAL; return 0; } -- cgit v1.2.3 From ca14573272e8cedb947426654992ae4d7c20c985 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:26 -0700 Subject: nfp: prefix vNIC phys_port_name with 'n' Some drivers are using a bare number inside phys_port_name as VF id and OpenStack's regexps will pick it up. We can't use a bare number for your vNICs, prefix the names with 'n'. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1f572896d1ee..75110c8d6a90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3289,7 +3289,7 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) if (nn->dp.is_vf) return -EOPNOTSUPP; - n = snprintf(name, len, "%d", nn->id); + n = snprintf(name, len, "n%d", nn->id); if (n >= len) return -EINVAL; -- cgit v1.2.3 From 055ee0d69887af1d511246d745610bdf9d627e75 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:27 -0700 Subject: nfp: abm: enable advanced queuing on demand ABM NIC FW has a cut-through mode where the PCIe queuing is bypassed, thus working like our standard NIC FWs. Use this mode by default and only enable queuing in switchdev mode where users can configure it. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 13 +++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.c | 11 +++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 2 ++ drivers/net/ethernet/netronome/nfp/nfp_abi.h | 14 ++++++++++++++ 4 files changed, 40 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index e40f6f06417b..676d3afc9bdd 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -36,10 +36,23 @@ #include "../nfpcore/nfp_cpp.h" #include "../nfp_app.h" +#include "../nfp_abi.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "main.h" +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE, + NULL, 0, NULL, 0); +} + +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE, + NULL, 0, NULL, 0); +} + void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) { alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5a12bb20bced..28a18ac62040 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -182,6 +182,7 @@ static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) { nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; return 0; @@ -200,6 +201,10 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) struct nfp_net *nn; int err; + err = nfp_abm_ctrl_qm_enable(abm); + if (err) + return err; + list_for_each_entry(nn, &pf->vnics, vnic_list) { struct nfp_abm_link *alink = nn->app_priv; @@ -217,6 +222,7 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) err_kill_all_reprs: nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); return err; } @@ -350,6 +356,11 @@ static int nfp_abm_init(struct nfp_app *app) if (err) goto err_free_abm; + /* We start in legacy mode, make sure advanced queuing is disabled */ + err = nfp_abm_ctrl_qm_disable(abm); + if (err) + goto err_free_abm; + err = -ENOMEM; reprs = nfp_reprs_alloc(pf->max_data_vnics); if (!reprs) diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 5938b69b8a84..7d129b205535 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -72,4 +72,6 @@ struct nfp_abm_link { void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index 7ffa6e6a9d1c..8b56c27931bf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -59,12 +59,26 @@ * @NFP_MBOX_POOL_SET: set shared buffer pool info/config * Input - struct nfp_shared_buf_pool_info_set * Output - None + * + * @NFP_MBOX_PCIE_ABM_ENABLE: enable PCIe-side advanced buffer management + * Enable advanced buffer management of the PCIe block. If ABM is disabled + * PCIe block maintains a very short queue of buffers and does tail drop. + * ABM allows more advanced buffering and priority control. + * Input - None + * Output - None + * + * @NFP_MBOX_PCIE_ABM_DISABLE: disable PCIe-side advanced buffer management + * Input - None + * Output - None */ enum nfp_mbox_cmd { NFP_MBOX_NO_CMD = 0x00, NFP_MBOX_POOL_GET = 0x01, NFP_MBOX_POOL_SET = 0x02, + + NFP_MBOX_PCIE_ABM_ENABLE = 0x03, + NFP_MBOX_PCIE_ABM_DISABLE = 0x04, }; #define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt" -- cgit v1.2.3 From 25e0036fcd241fcb4541522e511168871d7c8bed Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:28 -0700 Subject: nfp: abm: add helpers for configuring queue marking levels Queue levels for simple ECN marking are stored in _abi_nfd_out_q_lvls_X symbol, where X is the PCIe PF id. Find out the location of that symbol and add helpers for modifying it. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 80 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 3 + .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 5 ++ 3 files changed, 88 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 676d3afc9bdd..978884a0be19 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -35,12 +35,57 @@ #include #include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nffw.h" #include "../nfp_app.h" #include "../nfp_abi.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "main.h" +#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u" +#define NFP_QLVL_STRIDE 16 +#define NFP_QLVL_THRS 8 + +static unsigned long long +nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) +{ + return alink->abm->q_lvls->addr + + (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; +} + +static int +nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 muw; + int err; + + muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, + alink->abm->q_lvls->domain); + + err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + if (err) { + nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + return 0; +} + +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) +{ + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_set_q_lvl(alink, i, val); + if (err) + return err; + } + + return 0; +} + int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) { return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE, @@ -59,13 +104,48 @@ void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) alink->queue_base /= alink->vnic->stride_rx; } +static const struct nfp_rtsym * +nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) +{ + const struct nfp_rtsym *sym; + + sym = nfp_rtsym_lookup(pf->rtbl, name); + if (!sym) { + nfp_err(pf->cpp, "Symbol '%s' not found\n", name); + return ERR_PTR(-ENOENT); + } + if (sym->size != size) { + nfp_err(pf->cpp, + "Symbol '%s' wrong size: expected %u got %llu\n", + name, size, sym->size); + return ERR_PTR(-EINVAL); + } + + return sym; +} + +static const struct nfp_rtsym * +nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name, + unsigned int size) +{ + return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS); +} + int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) { struct nfp_pf *pf = abm->app->pf; + const struct nfp_rtsym *sym; unsigned int pf_id; + char pf_symbol[64]; pf_id = nfp_cppcore_pcie_unit(pf->cpp); abm->pf_id = pf_id; + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->q_lvls = sym; + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 7d129b205535..1ac651cdc140 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -49,11 +49,13 @@ struct nfp_net; * @pf_id: ID of our PF link * @eswitch_mode: devlink eswitch mode, advanced functions only visible * in switchdev mode + * @q_lvls: queue level control area */ struct nfp_abm { struct nfp_app *app; unsigned int pf_id; enum devlink_eswitch_mode eswitch_mode; + const struct nfp_rtsym *q_lvls; }; /** @@ -72,6 +74,7 @@ struct nfp_abm_link { void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 4e19add1c539..b0da3d436850 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -87,6 +87,11 @@ struct resource; #define NFP_CPP_TARGET_ID_MASK 0x1f +#define NFP_CPP_ATOMIC_RD(target, island) \ + NFP_CPP_ISLAND_ID((target), 3, 0, (island)) +#define NFP_CPP_ATOMIC_WR(target, island) \ + NFP_CPP_ISLAND_ID((target), 4, 0, (island)) + /** * NFP_CPP_ID() - pack target, token, and action into a CPP ID. * @target: NFP CPP target id -- cgit v1.2.3 From 8c8e6406f593ce21694ce7da6d86af49b4610e69 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:29 -0700 Subject: nfp: abm: add simple RED offload Offload simple RED configurations. For now support only DCTCP like scenarios where min and max are the same. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 82 +++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 10 ++++ 2 files changed, 92 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 28a18ac62040..22251d88c958 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -55,6 +57,84 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) FIELD_PREP(NFP_ABM_PORTID_ID, id); } +static void +nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + if (handle != alink->qdiscs[0].handle) + return; + + alink->qdiscs[0].handle = TC_H_UNSPEC; + port->tc_offload_cnt = 0; + nfp_abm_ctrl_set_all_q_lvls(alink, ~0); +} + +static int +nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + int err; + + if (opt->set.min != opt->set.max || !opt->set.is_ecn) { + nfp_warn(alink->abm->app->cpp, + "RED offload failed - unsupported parameters\n"); + err = -EINVAL; + goto err_destroy; + } + err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); + if (err) + goto err_destroy; + + alink->qdiscs[0].handle = opt->handle; + port->tc_offload_cnt = 1; + + return 0; +err_destroy: + if (alink->qdiscs[0].handle != TC_H_UNSPEC) + nfp_abm_red_destroy(netdev, alink, alink->qdiscs[0].handle); + return err; +} + +static int +nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + if (opt->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + switch (opt->command) { + case TC_RED_REPLACE: + return nfp_abm_red_replace(netdev, alink, opt); + case TC_RED_DESTROY: + nfp_abm_red_destroy(netdev, alink, opt->handle); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, void *type_data) +{ + struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!port || port->type != NFP_PORT_PF_PORT) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_QDISC_RED: + return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) { enum nfp_repr_type rtype; @@ -403,6 +483,8 @@ const struct nfp_app_type app_abm = { .vnic_alloc = nfp_abm_vnic_alloc, .vnic_free = nfp_abm_vnic_free, + .setup_tc = nfp_abm_setup_tc, + .eswitch_mode_get = nfp_abm_eswitch_mode_get, .eswitch_mode_set = nfp_abm_eswitch_mode_set, diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 1ac651cdc140..979f98fb808b 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -58,18 +58,28 @@ struct nfp_abm { const struct nfp_rtsym *q_lvls; }; +/** + * struct nfp_red_qdisc - representation of single RED Qdisc + * @handle: handle of currently offloaded RED Qdisc + */ +struct nfp_red_qdisc { + u32 handle; +}; + /** * struct nfp_abm_link - port tuple of a ABM NIC * @abm: back pointer to nfp_abm * @vnic: data vNIC * @id: id of the data vNIC * @queue_base: id of base to host queue within PCIe (not QC idx) + * @qdiscs: array of qdiscs */ struct nfp_abm_link { struct nfp_abm *abm; struct nfp_net *vnic; unsigned int id; unsigned int queue_base; + struct nfp_red_qdisc qdiscs[1]; }; void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); -- cgit v1.2.3 From cb89cac8e705b0405872a870842c6c2a0a0e5ec2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:31 -0700 Subject: nfp: abm: report statistics from RED offload Report basic and extended RED statistics back to TC. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 114 ++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.c | 92 +++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 38 +++++++++ 3 files changed, 244 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 978884a0be19..d2d9ca7a727c 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -44,8 +44,15 @@ #define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u" #define NFP_QLVL_STRIDE 16 +#define NFP_QLVL_BLOG_BYTES 0 +#define NFP_QLVL_BLOG_PKTS 4 #define NFP_QLVL_THRS 8 +#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats" +#define NFP_QMSTAT_STRIDE 32 +#define NFP_QMSTAT_DROP 16 +#define NFP_QMSTAT_ECN 24 + static unsigned long long nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) { @@ -53,6 +60,55 @@ nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; } +static int +nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, unsigned int i, + bool is_u64, u64 *res) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 val32, mur; + u64 val, addr; + int err; + + mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); + + addr = sym->addr + (alink->queue_base + i) * stride + offset; + if (is_u64) + err = nfp_cpp_readq(cpp, mur, addr, &val); + else + err = nfp_cpp_readl(cpp, mur, addr, &val32); + if (err) { + nfp_err(cpp, + "RED offload reading stat failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + *res = is_u64 ? val : val32; + return 0; +} + +static int +nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, bool is_u64, + u64 *res) +{ + u64 val, sum = 0; + unsigned int i; + int err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i, + is_u64, &val); + if (err) + return err; + sum += val; + } + + *res = sum; + return 0; +} + static int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) { @@ -86,6 +142,58 @@ int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) return 0; } +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats) +{ + u64 pkts = 0, bytes = 0; + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + } + stats->tx_pkts = pkts; + stats->tx_bytes = bytes; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &stats->overlimits); +} + +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &xstats->ecn_marked); +} + int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) { return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE, @@ -147,5 +255,11 @@ int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) return PTR_ERR(sym); abm->q_lvls = sym; + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->qm_stats = sym; + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 22251d88c958..d0c21899a8b7 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -57,6 +58,23 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) FIELD_PREP(NFP_ABM_PORTID_ID, id); } +static int nfp_abm_reset_stats(struct nfp_abm_link *alink) +{ + int err; + + err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[0].stats); + if (err) + return err; + alink->qdiscs[0].stats.backlog_pkts = 0; + alink->qdiscs[0].stats.backlog_bytes = 0; + + err = nfp_abm_ctrl_read_xstats(alink, &alink->qdiscs[0].xstats); + if (err) + return err; + + return 0; +} + static void nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, u32 handle) @@ -88,16 +106,86 @@ nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, if (err) goto err_destroy; + /* Reset stats only on new qdisc */ + if (alink->qdiscs[0].handle != opt->handle) { + err = nfp_abm_reset_stats(alink); + if (err) + goto err_destroy; + } + alink->qdiscs[0].handle = opt->handle; port->tc_offload_cnt = 1; return 0; err_destroy: + /* If the qdisc keeps on living, but we can't offload undo changes */ + if (alink->qdiscs[0].handle == opt->handle) { + opt->set.qstats->qlen -= alink->qdiscs[0].stats.backlog_pkts; + opt->set.qstats->backlog -= + alink->qdiscs[0].stats.backlog_bytes; + } if (alink->qdiscs[0].handle != TC_H_UNSPEC) nfp_abm_red_destroy(netdev, alink, alink->qdiscs[0].handle); return err; } +static void +nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, + struct tc_qopt_offload_stats *stats) +{ + _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, + new->tx_pkts - old->tx_pkts); + stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; + stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; + stats->qstats->overlimits += new->overlimits - old->overlimits; + stats->qstats->drops += new->drops - old->drops; +} + +static int +nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_stats *prev_stats; + struct nfp_alink_stats stats; + int err; + + if (alink->qdiscs[0].handle != opt->handle) + return -EOPNOTSUPP; + prev_stats = &alink->qdiscs[0].stats; + + err = nfp_abm_ctrl_read_stats(alink, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, prev_stats, &opt->stats); + + *prev_stats = stats; + + return 0; +} + +static int +nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_xstats *prev_xstats; + struct nfp_alink_xstats xstats; + int err; + + if (alink->qdiscs[0].handle != opt->handle) + return -EOPNOTSUPP; + prev_xstats = &alink->qdiscs[0].xstats; + + err = nfp_abm_ctrl_read_xstats(alink, &xstats); + if (err) + return err; + + opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; + opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; + + *prev_xstats = xstats; + + return 0; +} + static int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) @@ -111,6 +199,10 @@ nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, case TC_RED_DESTROY: nfp_abm_red_destroy(netdev, alink, opt->handle); return 0; + case TC_RED_STATS: + return nfp_abm_red_stats(alink, opt); + case TC_RED_XSTATS: + return nfp_abm_red_xstats(alink, opt); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 979f98fb808b..93a3b79cf468 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -50,20 +50,54 @@ struct nfp_net; * @eswitch_mode: devlink eswitch mode, advanced functions only visible * in switchdev mode * @q_lvls: queue level control area + * @qm_stats: queue statistics symbol */ struct nfp_abm { struct nfp_app *app; unsigned int pf_id; enum devlink_eswitch_mode eswitch_mode; const struct nfp_rtsym *q_lvls; + const struct nfp_rtsym *qm_stats; +}; + +/** + * struct nfp_alink_stats - ABM NIC statistics + * @tx_pkts: number of TXed packets + * @tx_bytes: number of TXed bytes + * @backlog_pkts: momentary backlog length (packets) + * @backlog_bytes: momentary backlog length (bytes) + * @overlimits: number of ECN marked TXed packets (accumulative) + * @drops: number of tail-dropped packets (accumulative) + */ +struct nfp_alink_stats { + u64 tx_pkts; + u64 tx_bytes; + u64 backlog_pkts; + u64 backlog_bytes; + u64 overlimits; + u64 drops; +}; + +/** + * struct nfp_alink_xstats - extended ABM NIC statistics + * @ecn_marked: number of ECN marked TXed packets + * @pdrop: number of hard drops due to queue limit + */ +struct nfp_alink_xstats { + u64 ecn_marked; + u64 pdrop; }; /** * struct nfp_red_qdisc - representation of single RED Qdisc * @handle: handle of currently offloaded RED Qdisc + * @stats: statistics from last refresh + * @xstats: base of extended statistics */ struct nfp_red_qdisc { u32 handle; + struct nfp_alink_stats stats; + struct nfp_alink_xstats xstats; }; /** @@ -85,6 +119,10 @@ struct nfp_abm_link { void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats); int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); #endif -- cgit v1.2.3 From 21f31bc02938dc4d5aa5803d03f56e8681a30977 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:32 -0700 Subject: nfp: allow apps to add extra stats to ports Allow nfp apps to add extra ethtool stats. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 22 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_app.h | 13 +++++++++++++ .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 10 ++++++++-- drivers/net/ethernet/netronome/nfp/nfp_port.h | 2 ++ 4 files changed, 45 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index c9d8a7ab311e..f28b244f4ee7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -43,6 +43,7 @@ #include "nfp_main.h" #include "nfp_net.h" #include "nfp_net_repr.h" +#include "nfp_port.h" static const struct nfp_app_type *apps[] = { [NFP_APP_CORE_NIC] = &app_nic, @@ -85,6 +86,27 @@ const char *nfp_app_mip_name(struct nfp_app *app) return nfp_mip_name(app->pf->mip); } +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats) + return data; + return port->app->type->port_get_stats(port->app, port, data); +} + +int nfp_app_port_get_stats_count(struct nfp_port *port) +{ + if (!port || !port->app || !port->app->type->port_get_stats_count) + return 0; + return port->app->type->port_get_stats_count(port->app, port); +} + +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats_strings) + return data; + return port->app->type->port_get_stats_strings(port->app, port, data); +} + struct sk_buff * nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 23b99a4e05c2..ee74caacb015 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -90,6 +90,9 @@ extern const struct nfp_app_type app_abm; * @repr_stop: representor netdev stop callback * @check_mtu: MTU change request on a netdev (verify it is valid) * @repr_change_mtu: MTU change request on repr (make and verify change) + * @port_get_stats: get extra ethtool statistics for a port + * @port_get_stats_count: get count of extra statistics for a port + * @port_get_stats_strings: get strings for extra statistics * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler @@ -132,6 +135,12 @@ struct nfp_app_type { int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev, int new_mtu); + u64 *(*port_get_stats)(struct nfp_app *app, + struct nfp_port *port, u64 *data); + int (*port_get_stats_count)(struct nfp_app *app, struct nfp_port *port); + u8 *(*port_get_stats_strings)(struct nfp_app *app, + struct nfp_port *port, u8 *data); + int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); @@ -404,6 +413,10 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data); +int nfp_app_port_get_stats_count(struct nfp_port *port); +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data); + struct nfp_reprs * nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type); struct nfp_reprs * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index c9016419bfa0..26d1cc4e2906 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -437,7 +437,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } -static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) { va_list args; @@ -637,6 +637,7 @@ static void nfp_net_get_strings(struct net_device *netdev, nn->dp.num_tx_rings, false); data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(nn->port, data); break; } } @@ -651,6 +652,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->dp.num_rx_rings, nn->dp.num_tx_rings); data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(nn->port, data); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -662,7 +664,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) return nfp_vnic_get_sw_stats_count(netdev) + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, nn->dp.num_tx_rings) + - nfp_mac_get_stats_count(netdev); + nfp_mac_get_stats_count(netdev) + + nfp_app_port_get_stats_count(nn->port); default: return -EOPNOTSUPP; } @@ -679,6 +682,7 @@ static void nfp_port_get_strings(struct net_device *netdev, data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(port, data); break; } } @@ -693,6 +697,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); else data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(port, data); } static int nfp_port_get_sset_count(struct net_device *netdev, int sset) @@ -706,6 +711,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) count = nfp_vnic_get_hw_stats_count(0, 0); else count = nfp_mac_get_stats_count(netdev); + count += nfp_app_port_get_stats_count(port); return count; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 18666750456e..51f10ae2d53e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -122,6 +122,8 @@ struct nfp_port { extern const struct ethtool_ops nfp_port_ethtool_ops; extern const struct switchdev_ops nfp_port_switchdev_ops; +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); + int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); -- cgit v1.2.3 From 0a8b7019bbcb219ef941f877650f9c09fa331eef Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:33 -0700 Subject: nfp: abm: expose the internal stats in ethtool There is a handful of statistics exposing some internal details of the implementation. Expose those via ethtool. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 22 ++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.c | 51 +++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/abm/main.h | 2 ++ 3 files changed, 75 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index d2d9ca7a727c..79fc9147c012 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -50,6 +50,8 @@ #define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats" #define NFP_QMSTAT_STRIDE 32 +#define NFP_QMSTAT_NON_STO 0 +#define NFP_QMSTAT_STO 8 #define NFP_QMSTAT_DROP 16 #define NFP_QMSTAT_ECN 24 @@ -142,6 +144,26 @@ int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) return 0; } +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_NON_STO, i, true, &val)) + return 0; + return val; +} + +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_STO, i, true, &val)) + return 0; + return val; +} + int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, struct nfp_alink_stats *stats) { diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index d0c21899a8b7..4e89159f13d3 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -497,6 +497,53 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) kfree(alink); } +static u64 * +nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + *data++ = nfp_abm_ctrl_stat_non_sto(alink, i); + *data++ = nfp_abm_ctrl_stat_sto(alink, i); + } + return data; +} + +static int +nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + + if (port->type != NFP_PORT_PF_PORT) + return 0; + alink = repr->app_priv; + return alink->vnic->dp.num_r_vecs * 2; +} + +static u8 * +nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, + u8 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + data = nfp_pr_et(data, "q%u_no_wait", i); + data = nfp_pr_et(data, "q%u_delayed", i); + } + return data; +} + static int nfp_abm_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; @@ -575,6 +622,10 @@ const struct nfp_app_type app_abm = { .vnic_alloc = nfp_abm_vnic_alloc, .vnic_free = nfp_abm_vnic_free, + .port_get_stats = nfp_abm_port_get_stats, + .port_get_stats_count = nfp_abm_port_get_stats_count, + .port_get_stats_strings = nfp_abm_port_get_stats_strings, + .setup_tc = nfp_abm_setup_tc, .eswitch_mode_get = nfp_abm_eswitch_mode_get, diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 93a3b79cf468..09fd15847961 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -123,6 +123,8 @@ int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, struct nfp_alink_stats *stats); int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, struct nfp_alink_xstats *xstats); +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i); +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i); int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); #endif -- cgit v1.2.3 From 2ef3c253f15a7784ce541417d5663d2d1e751231 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:34 -0700 Subject: nfp: abm: expose all PF queues Allocate the PF representor as multi-queue to allow setting the configuration per-queue. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 10 +++++++--- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 5 +++-- drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 7 ++++++- 3 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 4e89159f13d3..ef77d7b0d99d 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -255,14 +255,18 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, struct nfp_reprs *reprs; struct nfp_repr *repr; struct nfp_port *port; + unsigned int txqs; int err; - if (ptype == NFP_PORT_PHYS_PORT) + if (ptype == NFP_PORT_PHYS_PORT) { rtype = NFP_REPR_TYPE_PHYS_PORT; - else + txqs = 1; + } else { rtype = NFP_REPR_TYPE_PF; + txqs = alink->vnic->max_rx_rings; + } - netdev = nfp_repr_alloc(app); + netdev = nfp_repr_alloc_mqs(app, txqs, 1); if (!netdev) return -ENOMEM; repr = netdev_priv(netdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 117eca6819de..d7b712f6362f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -360,12 +360,13 @@ void nfp_repr_free(struct net_device *netdev) __nfp_repr_free(netdev_priv(netdev)); } -struct net_device *nfp_repr_alloc(struct nfp_app *app) +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs) { struct net_device *netdev; struct nfp_repr *repr; - netdev = alloc_etherdev(sizeof(*repr)); + netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs); if (!netdev) return NULL; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 8366e4f3c623..1bf2b18109ab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -126,7 +126,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); void nfp_repr_free(struct net_device *netdev); -struct net_device *nfp_repr_alloc(struct nfp_app *app); +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs); void nfp_repr_clean_and_free(struct nfp_repr *repr); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, @@ -134,4 +135,8 @@ void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs); int nfp_reprs_resync_phys_ports(struct nfp_app *app); +static inline struct net_device *nfp_repr_alloc(struct nfp_app *app) +{ + return nfp_repr_alloc_mqs(app, 1, 1); +} #endif /* NFP_NET_REPR_H */ -- cgit v1.2.3 From 674cb229b61039c4763838496e9241f4e6f145a8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:36 -0700 Subject: nfp: abm: multi-queue RED offload Add support for MQ offload and setting RED parameters on queue-by-queue basis. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 50 ++++++- drivers/net/ethernet/netronome/nfp/abm/main.c | 192 ++++++++++++++++++++------ drivers/net/ethernet/netronome/nfp/abm/main.h | 14 +- 3 files changed, 208 insertions(+), 48 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 79fc9147c012..b157ccd8c80f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -111,8 +111,7 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, return 0; } -static int -nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) { struct nfp_cpp *cpp = alink->abm->app->cpp; u32 muw; @@ -164,6 +163,37 @@ u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i) return val; } +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats) +{ + int err; + + stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + i, false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + i, false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &stats->overlimits); +} + int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, struct nfp_alink_stats *stats) { @@ -200,6 +230,22 @@ int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, true, &stats->overlimits); } +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &xstats->ecn_marked); +} + int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, struct nfp_alink_xstats *xstats) { diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index ef77d7b0d99d..21d5af1fb061 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -58,43 +58,77 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) FIELD_PREP(NFP_ABM_PORTID_ID, id); } -static int nfp_abm_reset_stats(struct nfp_abm_link *alink) +static int +__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs, u32 init_val) { - int err; + struct nfp_port *port = nfp_port_from_netdev(netdev); + int ret; - err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[0].stats); - if (err) - return err; - alink->qdiscs[0].stats.backlog_pkts = 0; - alink->qdiscs[0].stats.backlog_bytes = 0; + ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); + memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); - err = nfp_abm_ctrl_read_xstats(alink, &alink->qdiscs[0].xstats); - if (err) - return err; + alink->parent = handle; + alink->num_qdiscs = qs; + port->tc_offload_cnt = qs; - return 0; + return ret; +} + +static void +nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs) +{ + __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); +} + +static int +nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + unsigned int i = TC_H_MIN(opt->parent) - 1; + + if (opt->parent == TC_H_ROOT) + i = 0; + else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) + i = TC_H_MIN(opt->parent) - 1; + else + return -EOPNOTSUPP; + + if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) + return -EOPNOTSUPP; + + return i; } static void nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, u32 handle) { - struct nfp_port *port = nfp_port_from_netdev(netdev); + unsigned int i; - if (handle != alink->qdiscs[0].handle) + for (i = 0; i < alink->num_qdiscs; i++) + if (handle == alink->qdiscs[i].handle) + break; + if (i == alink->num_qdiscs) return; - alink->qdiscs[0].handle = TC_H_UNSPEC; - port->tc_offload_cnt = 0; - nfp_abm_ctrl_set_all_q_lvls(alink, ~0); + if (alink->parent == TC_H_ROOT) { + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + } else { + nfp_abm_ctrl_set_q_lvl(alink, i, ~0); + memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); + } } static int nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) { - struct nfp_port *port = nfp_port_from_netdev(netdev); - int err; + bool existing; + int i, err; + + i = nfp_abm_red_find(alink, opt); + existing = i >= 0; if (opt->set.min != opt->set.max || !opt->set.is_ecn) { nfp_warn(alink->abm->app->cpp, @@ -102,30 +136,62 @@ nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, err = -EINVAL; goto err_destroy; } - err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); - if (err) - goto err_destroy; - /* Reset stats only on new qdisc */ - if (alink->qdiscs[0].handle != opt->handle) { - err = nfp_abm_reset_stats(alink); + if (existing) { + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); + else + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); if (err) goto err_destroy; + return 0; } - alink->qdiscs[0].handle = opt->handle; - port->tc_offload_cnt = 1; + if (opt->parent == TC_H_ROOT) { + i = 0; + err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, + opt->set.min); + } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { + i = TC_H_MIN(opt->parent) - 1; + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + } else { + return -EINVAL; + } + /* Set the handle to try full clean up, in case IO failed */ + alink->qdiscs[i].handle = opt->handle; + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, + &alink->qdiscs[i].stats); + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, + &alink->qdiscs[i].xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, + &alink->qdiscs[i].xstats); + if (err) + goto err_destroy; + + alink->qdiscs[i].stats.backlog_pkts = 0; + alink->qdiscs[i].stats.backlog_bytes = 0; return 0; err_destroy: /* If the qdisc keeps on living, but we can't offload undo changes */ - if (alink->qdiscs[0].handle == opt->handle) { - opt->set.qstats->qlen -= alink->qdiscs[0].stats.backlog_pkts; + if (existing) { + opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; opt->set.qstats->backlog -= - alink->qdiscs[0].stats.backlog_bytes; + alink->qdiscs[i].stats.backlog_bytes; } - if (alink->qdiscs[0].handle != TC_H_UNSPEC) - nfp_abm_red_destroy(netdev, alink, alink->qdiscs[0].handle); + nfp_abm_red_destroy(netdev, alink, opt->handle); + return err; } @@ -146,13 +212,17 @@ nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) { struct nfp_alink_stats *prev_stats; struct nfp_alink_stats stats; - int err; + int i, err; - if (alink->qdiscs[0].handle != opt->handle) - return -EOPNOTSUPP; - prev_stats = &alink->qdiscs[0].stats; + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_stats = &alink->qdiscs[i].stats; - err = nfp_abm_ctrl_read_stats(alink, &stats); + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); if (err) return err; @@ -168,13 +238,17 @@ nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) { struct nfp_alink_xstats *prev_xstats; struct nfp_alink_xstats xstats; - int err; + int i, err; - if (alink->qdiscs[0].handle != opt->handle) - return -EOPNOTSUPP; - prev_xstats = &alink->qdiscs[0].xstats; + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_xstats = &alink->qdiscs[i].xstats; - err = nfp_abm_ctrl_read_xstats(alink, &xstats); + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, &xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); if (err) return err; @@ -190,9 +264,6 @@ static int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) { - if (opt->parent != TC_H_ROOT) - return -EOPNOTSUPP; - switch (opt->command) { case TC_RED_REPLACE: return nfp_abm_red_replace(netdev, alink, opt); @@ -208,6 +279,24 @@ nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, } } +static int +nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + switch (opt->command) { + case TC_MQ_CREATE: + nfp_abm_reset_root(netdev, alink, opt->handle, + alink->total_queues); + return 0; + case TC_MQ_DESTROY: + if (opt->handle == alink->parent) + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) @@ -220,6 +309,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; switch (type) { + case TC_SETUP_QDISC_MQ: + return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); case TC_SETUP_QDISC_RED: return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); default: @@ -473,13 +564,21 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) alink->abm = abm; alink->vnic = nn; alink->id = id; + alink->parent = TC_H_ROOT; + alink->total_queues = alink->vnic->max_rx_rings; + alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues, + GFP_KERNEL); + if (!alink->qdiscs) { + err = -ENOMEM; + goto err_free_alink; + } /* This is a multi-host app, make sure MAC/PHY is up, but don't * make the MAC/PHY state follow the state of any of the ports. */ err = nfp_eth_set_configured(app->cpp, eth_port->index, true); if (err < 0) - goto err_free_alink; + goto err_free_qdiscs; netif_keep_dst(nn->dp.netdev); @@ -488,6 +587,8 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) return 0; +err_free_qdiscs: + kvfree(alink->qdiscs); err_free_alink: kfree(alink); return err; @@ -498,6 +599,7 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) struct nfp_abm_link *alink = nn->app_priv; nfp_abm_kill_reprs(alink->abm, alink); + kvfree(alink->qdiscs); kfree(alink); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 09fd15847961..934a70835473 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -106,6 +106,9 @@ struct nfp_red_qdisc { * @vnic: data vNIC * @id: id of the data vNIC * @queue_base: id of base to host queue within PCIe (not QC idx) + * @total_queues: number of PF queues + * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT + * @num_qdiscs: number of currently used qdiscs * @qdiscs: array of qdiscs */ struct nfp_abm_link { @@ -113,16 +116,25 @@ struct nfp_abm_link { struct nfp_net *vnic; unsigned int id; unsigned int queue_base; - struct nfp_red_qdisc qdiscs[1]; + unsigned int total_queues; + u32 parent; + unsigned int num_qdiscs; + struct nfp_red_qdisc *qdiscs; }; void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, + u32 val); int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats); int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, struct nfp_alink_xstats *xstats); +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats); u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i); u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i); int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); -- cgit v1.2.3 From 2440711e49b6144ef74a51e29e4e876ac5f0d066 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 May 2018 21:53:38 -0700 Subject: nfp: abm: report correct MQ stats Report the stat diff to make sure MQ stats add up to child stats. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/abm/main.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 21d5af1fb061..1561c2724c26 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -279,6 +279,28 @@ nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, } } +static int +nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) +{ + struct nfp_alink_stats stats; + unsigned int i; + int err; + + for (i = 0; i < alink->num_qdiscs; i++) { + if (alink->qdiscs[i].handle == TC_H_UNSPEC) + continue; + + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, + &opt->stats); + } + + return 0; +} + static int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) @@ -292,6 +314,8 @@ nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, if (opt->handle == alink->parent) nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); return 0; + case TC_MQ_STATS: + return nfp_abm_mq_stats(alink, opt); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From c1c9a3c9663b2e15176758626278792862f1ed32 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 26 May 2018 19:15:48 +0800 Subject: net: remove unnecessary genlmsg_cancel() calls the message be freed immediately, no need to trim it back to the previous size. Inspired by commit 7a9b3ec1e19f ("nl80211: remove unnecessary genlmsg_cancel() calls") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/team/team.c | 2 -- drivers/net/wireless/mac80211_hwsim.c | 1 - net/core/devlink.c | 4 ---- net/ipv6/seg6.c | 1 - net/ncsi/ncsi-netlink.c | 1 - net/nfc/netlink.c | 17 ----------------- 6 files changed, 26 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index e6730a01d130..267dcc929f6c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2426,7 +2426,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; errout: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } @@ -2720,7 +2719,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; errout: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 89fc22520d40..9825bfd42abc 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2514,7 +2514,6 @@ static void hwsim_mcast_new_radio(int id, struct genl_info *info, return; out_err: - genlmsg_cancel(mcast_skb, data); nlmsg_free(mcast_skb); } diff --git a/net/core/devlink.c b/net/core/devlink.c index 475246b355f0..f75ee022e6b2 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1826,7 +1826,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; err_table_put: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } @@ -2032,7 +2031,6 @@ int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx) return 0; nla_put_failure: - genlmsg_cancel(dump_ctx->skb, dump_ctx->hdr); nlmsg_free(dump_ctx->skb); return -EMSGSIZE; } @@ -2249,7 +2247,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; err_table_put: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } @@ -2551,7 +2548,6 @@ nla_put_failure: err = -EMSGSIZE; err_resource_put: err_skb_send_alloc: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 7f5621d09571..0fdf2a55e746 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -226,7 +226,6 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info) nla_put_failure: rcu_read_unlock(); - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -ENOMEM; diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index b09ef77bf4cd..99f4c22e2c8f 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -201,7 +201,6 @@ static int ncsi_pkg_info_nl(struct sk_buff *msg, struct genl_info *info) return genlmsg_reply(skb, info); err: - genlmsg_cancel(skb, hdr); kfree_skb(skb); return rc; } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index f018eafc2a0d..376181cc1def 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -206,7 +206,6 @@ int nfc_genl_targets_found(struct nfc_dev *dev) return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -237,7 +236,6 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -269,7 +267,6 @@ int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -299,7 +296,6 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -340,7 +336,6 @@ int nfc_genl_device_added(struct nfc_dev *dev) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -370,7 +365,6 @@ int nfc_genl_device_removed(struct nfc_dev *dev) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -434,8 +428,6 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list) return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: - genlmsg_cancel(msg, hdr); - free_msg: nlmsg_free(msg); @@ -470,7 +462,6 @@ int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -501,7 +492,6 @@ int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -546,7 +536,6 @@ int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: /* evt_transaction is no more used */ devm_kfree(&dev->dev, evt_transaction); @@ -585,7 +574,6 @@ int nfc_genl_se_connectivity(struct nfc_dev *dev, u8 se_idx) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -703,7 +691,6 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -735,7 +722,6 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev) return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -1030,7 +1016,6 @@ static int nfc_genl_send_params(struct sk_buff *msg, return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); return -EMSGSIZE; } @@ -1290,7 +1275,6 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, return 0; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; @@ -1507,7 +1491,6 @@ static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err) return; nla_put_failure: - genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); kfree(ctx); -- cgit v1.2.3 From c3cd281e2bc8da18f225f1191e9751d771411933 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 25 May 2018 23:36:06 +0200 Subject: net/mlx5e: fix TLS dependency With CONFIG_TLS=m and MLX5_CORE_EN=y, we get a link failure: drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.o: In function `mlx5e_tls_handle_ooo': tls_rxtx.c:(.text+0x24c): undefined reference to `tls_get_record' drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.o: In function `mlx5e_tls_handle_tx_skb': tls_rxtx.c:(.text+0x9a8): undefined reference to `tls_device_sk_destruct' This narrows down the dependency to only allow the configurations that will actually work. The existing dependency on TLS_DEVICE is not sufficient here since MLX5_EN_TLS is a 'bool' symbol. Fixes: c83294b9efa5 ("net/mlx5e: TLS, Add Innova TLS TX support") Signed-off-by: Arnd Bergmann Acked-by: Saeed Mahameed Acked-by: Boris Pismenny Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index ee6684779d11..2545296a0c08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -91,6 +91,7 @@ config MLX5_EN_TLS bool "TLS cryptography-offload accelaration" depends on MLX5_CORE_EN depends on TLS_DEVICE + depends on TLS=y || MLX5_CORE=m depends on MLX5_ACCEL default n ---help--- -- cgit v1.2.3 From d377df784178bf5b0a39e75dc8b1ee86e1abb3f6 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Sat, 26 May 2018 20:29:14 -0500 Subject: net: qcom/emac: fix device tree initialization Commit "net: qcom/emac: Encapsulate sgmii ops under one structure" introduced the sgmii_ops structure, but did not correctly initialize it on device tree platforms. This resulted in compiler warnings when ACPI is not enabled. Reported-by: Arnd Bergmann Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 562420b834df..e78e5db39458 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -273,6 +273,14 @@ static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup) return 0; } +static struct sgmii_ops fsm9900_ops = { + .init = emac_sgmii_init_fsm9900, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; + static struct sgmii_ops qdf2432_ops = { .init = emac_sgmii_init_qdf2432, .open = emac_sgmii_common_open, @@ -281,6 +289,7 @@ static struct sgmii_ops qdf2432_ops = { .reset = emac_sgmii_common_reset, }; +#ifdef CONFIG_ACPI static struct sgmii_ops qdf2400_ops = { .init = emac_sgmii_init_qdf2400, .open = emac_sgmii_common_open, @@ -288,6 +297,7 @@ static struct sgmii_ops qdf2400_ops = { .link_change = emac_sgmii_common_link_change, .reset = emac_sgmii_common_reset, }; +#endif static int emac_sgmii_acpi_match(struct device *dev, void *data) { @@ -335,11 +345,11 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) static const struct of_device_id emac_sgmii_dt_match[] = { { .compatible = "qcom,fsm9900-emac-sgmii", - .data = emac_sgmii_init_fsm9900, + .data = &fsm9900_ops, }, { .compatible = "qcom,qdf2432-emac-sgmii", - .data = emac_sgmii_init_qdf2432, + .data = &qdf2432_ops, }, {} }; @@ -386,7 +396,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->sgmii_ops->init = match->data; + phy->sgmii_ops = (struct sgmii_ops *)match->data; } /* Base address is the first address */ -- cgit v1.2.3 From 12b003b2e4e47f9f395ffd29719370023b4f0ba8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 27 May 2018 09:56:13 +0300 Subject: mlxsw: reg: Add Management Reset and Shutdown Register Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 3f4d7e22cece..1877d9f8a11a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -7034,6 +7034,30 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, mlxsw_reg_mpar_pa_id_set(payload, pa_id); } +/* MRSR - Management Reset and Shutdown Register + * --------------------------------------------- + * MRSR register is used to reset or shutdown the switch or + * the entire system (when applicable). + */ +#define MLXSW_REG_MRSR_ID 0x9023 +#define MLXSW_REG_MRSR_LEN 0x08 + +MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN); + +/* reg_mrsr_command + * Reset/shutdown command + * 0 - do nothing + * 1 - software reset + * Access: WO + */ +MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4); + +static inline void mlxsw_reg_mrsr_pack(char *payload) +{ + MLXSW_REG_ZERO(mrsr, payload); + mlxsw_reg_mrsr_command_set(payload, 1); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -7898,6 +7922,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), + MLXSW_REG(mrsr), MLXSW_REG(mlcr), MLXSW_REG(mpsc), MLXSW_REG(mcqi), -- cgit v1.2.3 From 2a360bf0f6608f61ef149220cfb9c369f5d086b7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 27 May 2018 09:56:14 +0300 Subject: mlxsw: cmd: Handle error after reset gracefully There is an exception in command interface processing in case the MRSR register is written to. The register triggers FW reset and during the reset FW returns an error. So handle this by ignoring this error while writing to MRSR register. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 16 +++++++++++----- drivers/net/ethernet/mellanox/mlxsw/core.c | 26 +++++++++++++++++++------- 2 files changed, 30 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 8da91b023b13..2bc48054b685 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -58,7 +58,7 @@ static inline void mlxsw_cmd_mbox_zero(char *mbox) struct mlxsw_core; int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size); @@ -67,7 +67,7 @@ static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode, size_t in_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - in_mbox, in_mbox_size, NULL, 0); + false, in_mbox, in_mbox_size, NULL, 0); } static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, @@ -76,7 +76,7 @@ static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, char *out_mbox, size_t out_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, - out_mbox_direct, NULL, 0, + out_mbox_direct, false, NULL, 0, out_mbox, out_mbox_size); } @@ -84,7 +84,7 @@ static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - NULL, 0, NULL, 0); + false, NULL, 0, NULL, 0); } enum mlxsw_cmd_opcode { @@ -179,6 +179,8 @@ enum mlxsw_cmd_status { MLXSW_CMD_STATUS_BAD_INDEX = 0x0A, /* NVMEM checksum/CRC failed. */ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B, + /* Device is currently running reset */ + MLXSW_CMD_STATUS_RUNNING_RESET = 0x26, /* Bad management packet (silently discarded). */ MLXSW_CMD_STATUS_BAD_PKT = 0x30, }; @@ -208,6 +210,8 @@ static inline const char *mlxsw_cmd_status_str(u8 status) return "BAD_INDEX"; case MLXSW_CMD_STATUS_BAD_NVMEM: return "BAD_NVMEM"; + case MLXSW_CMD_STATUS_RUNNING_RESET: + return "RUNNING_RESET"; case MLXSW_CMD_STATUS_BAD_PKT: return "BAD_PKT"; default: @@ -869,10 +873,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8); */ static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core, + bool reset_ok, char *in_mbox, char *out_mbox) { return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG, - 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE, + 0, 0, false, reset_ok, + in_mbox, MLXSW_CMD_MBOX_SIZE, out_mbox, MLXSW_CMD_MBOX_SIZE); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a38faec45b30..1d9ecf89854e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1480,6 +1480,7 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, { enum mlxsw_emad_op_tlv_status status; int err, n_retry; + bool reset_ok; char *in_mbox, *out_mbox, *tmp; dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", @@ -1501,9 +1502,16 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); mlxsw_emad_pack_reg_tlv(tmp, reg, payload); + /* There is a special treatment needed for MRSR (reset) register. + * The command interface will return error after the command + * is executed, so tell the lower layer to expect it + * and cope accordingly. + */ + reset_ok = reg->id == MLXSW_REG_MRSR_ID; + n_retry = 0; retry: - err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); + err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); if (!err) { err = mlxsw_emad_process_status(out_mbox, &status); if (err) { @@ -1793,7 +1801,7 @@ static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, } int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size) { @@ -1816,7 +1824,15 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mbox, in_mbox_size, out_mbox, out_mbox_size, &status); - if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { + if (!err && out_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); + } + + if (reset_ok && err == -EIO && + status == MLXSW_CMD_STATUS_RUNNING_RESET) { + err = 0; + } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod, status, mlxsw_cmd_status_str(status)); @@ -1826,10 +1842,6 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mod); } - if (!err && out_mbox) { - dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); - mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); - } return err; } EXPORT_SYMBOL(mlxsw_cmd_exec); -- cgit v1.2.3 From f3a52c6162f835727326a0a497e289494caf1fa5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 27 May 2018 09:56:15 +0300 Subject: mlxsw: pci: Utilize MRSR register to perform FW reset So far, the PCI BAR0 register is used for triggering FW reset. However, that is a legacy attitude and it is recommended to use MRSR to perform reset instead. So do that. Move the reset into init() function as the cmd interface needs to be used. With that, IRQ initialization needs to be moved as well. As a side effect, the reset move simplifies the devlink reload flow. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/core.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/pci.c | 130 +++++++++++++---------------- 3 files changed, 62 insertions(+), 74 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 1d9ecf89854e..8a766fe28fa0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -966,14 +966,12 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus; int err; - if (!mlxsw_bus->reset) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) return -EOPNOTSUPP; mlxsw_core_bus_device_unregister(mlxsw_core, true); - mlxsw_bus->reset(mlxsw_core->bus_priv); err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, mlxsw_core->bus, mlxsw_core->bus_priv, true, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 4eac7fbd07d5..4a8d4c7f89d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -337,6 +337,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id) #define MLXSW_BUS_F_TXRX BIT(0) +#define MLXSW_BUS_F_RESET BIT(1) struct mlxsw_bus { const char *kind; @@ -344,7 +345,6 @@ struct mlxsw_bus { const struct mlxsw_config_profile *profile, struct mlxsw_res *res); void (*fini)(void *bus_priv); - void (*reset)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index db794a1a3a7e..fc4557245ff4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1371,6 +1371,51 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + unsigned long end; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + int err; + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; + } + + /* We must wait for the HW to become responsive once again. */ + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); + + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + do { + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); + + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) + break; + cond_resched(); + } while (time_before(jiffies, end)); + return 0; +} + +static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) + dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); + return err; +} + +static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + pci_free_irq_vectors(mlxsw_pci->pdev); +} + static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, struct mlxsw_res *res) @@ -1398,6 +1443,16 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_out_mbox_alloc; + err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); + if (err) + goto err_sw_reset; + + err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); + if (err < 0) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_alloc_irq; + } + err = mlxsw_cmd_query_fw(mlxsw_core, mbox); if (err) goto err_query_fw; @@ -1481,6 +1536,9 @@ err_fw_area_init: err_doorbell_page_bar: err_iface_rev: err_query_fw: + mlxsw_pci_free_irq_vectors(mlxsw_pci); +err_alloc_irq: +err_sw_reset: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); err_out_mbox_alloc: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); @@ -1496,6 +1554,7 @@ static void mlxsw_pci_fini(void *bus_priv) free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); + mlxsw_pci_free_irq_vectors(mlxsw_pci); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } @@ -1677,58 +1736,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, return err; } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) -{ - unsigned long end; - - mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); - if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { - msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - return 0; - } - - /* Reset needs to be written before we read control register, and - * we must wait for the HW to become responsive once again - */ - wmb(); - msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); - - end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - - if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) - break; - cond_resched(); - } while (time_before(jiffies, end)); - return 0; -} - -static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - pci_free_irq_vectors(mlxsw_pci->pdev); -} - -static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - int err; - - err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); - if (err < 0) - dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); - return err; -} - -static void mlxsw_pci_reset(void *bus_priv) -{ - struct mlxsw_pci *mlxsw_pci = bus_priv; - - mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); - mlxsw_pci_alloc_irq_vectors(mlxsw_pci); -} - static const struct mlxsw_bus mlxsw_pci_bus = { .kind = "pci", .init = mlxsw_pci_init, @@ -1736,8 +1743,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, .skb_transmit = mlxsw_pci_skb_transmit, .cmd_exec = mlxsw_pci_cmd_exec, - .features = MLXSW_BUS_F_TXRX, - .reset = mlxsw_pci_reset, + .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1795,18 +1801,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); - err = mlxsw_pci_sw_reset(mlxsw_pci, id); - if (err) { - dev_err(&pdev->dev, "Software reset failed\n"); - goto err_sw_reset; - } - - err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); - if (err < 0) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1823,9 +1817,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - mlxsw_pci_free_irq_vectors(mlxsw_pci); -err_msix_init: -err_sw_reset: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1843,7 +1834,6 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); - mlxsw_pci_free_irq_vectors(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); -- cgit v1.2.3 From d80c9f88317272b54b0e0ec0dd6332d7aef5deea Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 25 Jul 2017 08:43:09 +0300 Subject: net/mlx5: FPGA, Add doxygen for access type enum Add doxygen comments for enum mlx5_fpga_access_type. Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index a0573cc2fc9b..656f96be6e20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -44,8 +44,14 @@ #define SBU_QP_QUEUE_SIZE 8 #define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000) +/** + * enum mlx5_fpga_access_type - Enumerated the different methods possible for + * accessing the device memory address space + */ enum mlx5_fpga_access_type { + /** Use the slow CX-FPGA I2C bus */ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, + /** Use the fastest available method */ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0, }; -- cgit v1.2.3 From d1a15b1a7e75503b178c75476faa689ed86db223 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Sun, 2 Jul 2017 10:47:24 +0300 Subject: net/mlx5: FPGA, Add device name Add device name for Mellanox FPGA devices. Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h | 7 +++++++ .../net/ethernet/mellanox/mlx5/core/fpga/core.c | 24 +++++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index d05233c9b4f6..eb8b0fe0b4e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h @@ -35,6 +35,13 @@ #include +enum mlx5_fpga_device_id { + MLX5_FPGA_DEVICE_UNKNOWN = 0, + MLX5_FPGA_DEVICE_KU040 = 1, + MLX5_FPGA_DEVICE_KU060 = 2, + MLX5_FPGA_DEVICE_KU060_2 = 3, +}; + enum mlx5_fpga_image { MLX5_FPGA_IMAGE_USER = 0, MLX5_FPGA_IMAGE_FACTORY, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index dc8970346521..8531098a7f19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -75,6 +75,21 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) } } +static const char *mlx5_fpga_device_name(u32 device) +{ + switch (device) { + case MLX5_FPGA_DEVICE_KU040: + return "ku040"; + case MLX5_FPGA_DEVICE_KU060: + return "ku060"; + case MLX5_FPGA_DEVICE_KU060_2: + return "ku060_2"; + case MLX5_FPGA_DEVICE_UNKNOWN: + default: + return "unknown"; + } +} + static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) { struct mlx5_fpga_query query; @@ -128,8 +143,9 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned long flags; unsigned int max_num_qps; + unsigned long flags; + u32 fpga_device_id; int err; if (!fdev) @@ -143,8 +159,10 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) if (err) goto out; - mlx5_fpga_info(fdev, "device %u; %s image, version %u\n", - MLX5_CAP_FPGA(fdev->mdev, fpga_device), + fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); + mlx5_fpga_info(fdev, "%s:%u; %s image, version %u\n", + mlx5_fpga_device_name(fpga_device_id), + fpga_device_id, mlx5_fpga_image_name(fdev->last_oper_image), MLX5_CAP_FPGA(fdev->mdev, image_version)); -- cgit v1.2.3 From 98c90f6f103aa72d2ad587b76105834b2b78d031 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 4 Jul 2017 12:53:29 +0300 Subject: net/mlx5: FPGA, print SBU identification on init Add print of the following values on init: 1. ieee vendor id 2. sandbox product id 3. sandbox product version Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 8531098a7f19..02319f779a49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -160,11 +160,14 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) goto out; fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); - mlx5_fpga_info(fdev, "%s:%u; %s image, version %u\n", + mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n", mlx5_fpga_device_name(fpga_device_id), fpga_device_id, mlx5_fpga_image_name(fdev->last_oper_image), - MLX5_CAP_FPGA(fdev->mdev, image_version)); + MLX5_CAP_FPGA(fdev->mdev, image_version), + MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version)); max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); err = mlx5_core_reserve_gids(mdev, max_num_qps); -- cgit v1.2.3 From 90275d809ab982a7fdc1f2ed5b0fe3e4c55a5257 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Wed, 8 Nov 2017 18:07:17 +0200 Subject: net/mlx5: FPGA, Abort FPGA init if the device reports no QP capability In the case that the reported max number of QPs capability equals to zero, abort FPGA init. Signed-off-by: Yevgeny Kliteynik Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 02319f779a49..26caa0475985 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -170,6 +170,12 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version)); max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); + if (!max_num_qps) { + mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n"); + err = -ENOTSUPP; + goto out; + } + err = mlx5_core_reserve_gids(mdev, max_num_qps); if (err) goto out; -- cgit v1.2.3 From ba869ee0e0f53077f58ce43a9381b4ee0a20cdd6 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Tue, 14 Nov 2017 10:30:55 +0200 Subject: net/mlx5: FPGA, Properly initialize dma direction on fpga conn send Properly initialize dma direction on fpga conn send. Do not rely on dma_dir == 0 (DMA_BIDIRECTIONAL). Signed-off-by: Ilya Lesokhin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 4e5a5cf25f17..bf84678b21d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -181,6 +181,7 @@ int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn, if (!conn->qp.active) return -ENOTCONN; + buf->dma_dir = DMA_TO_DEVICE; err = mlx5_fpga_conn_map_buf(conn, buf); if (err) return err; -- cgit v1.2.3 From 36dd4902004c39b7e8a16a7cf1535faa7ecfa921 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 28 Aug 2017 09:47:32 +0300 Subject: net/mlx5: FPGA, Call DMA unmap with the right size When mlx5_fpga_conn_unmap_buf is called buf->sg[0].size should equal the actual buffer size, not the message size. Otherwise we will trigger the following dma debug warning "DMA-API: device driver frees DMA memory with different size" Fixes: 537a50574175 ('net/mlx5: FPGA, Add high-speed connection routines') Signed-off-by: Ilya Lesokhin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index bf84678b21d6..4138a770ed57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -256,8 +256,6 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; - if (!status) - buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) @@ -275,6 +273,7 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, return; } + buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); -- cgit v1.2.3 From 01252a27837a9dd099a6e8cfa3adc4772033a5bf Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 22 May 2018 20:18:36 +0300 Subject: net/mlx5e: Get the number of offloaded TC rules from the correct table As we keep the offloaded TC rules for NIC and e-switch in two different places, make sure to return the number of offloaded flows according to the use-case and not blindly from the priv. Fixes: 655dc3d2b91b ('net/mlx5e: Use shared table for offloaded TC eswitch flows') Signed-off-by: Or Gerlitz Reported-by: Paul Blakey Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 7 +++++++ drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 5 +---- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9372d914abe5..0edf4751a8ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2876,3 +2876,10 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) { rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); } + +int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +{ + struct rhashtable *tc_ht = get_tc_ht(priv); + + return atomic_read(&tc_ht->nelems); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 59e52b845beb..49436bf3b80a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -68,10 +68,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) -{ - return atomic_read(&priv->fs.tc.ht.nelems); -} +int mlx5e_tc_num_filters(struct mlx5e_priv *priv); #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } -- cgit v1.2.3 From fabdcc2ecd58500689f4fb73dc77283623d9ab7e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 26 Mar 2018 13:07:03 +0300 Subject: iwlwifi: mvm: drop UNKNOWN security type frames In some cases we may get from FW errored frames with UNKNOWN security type. This may happen in unsecured aggregation flow, where the first packet had a CRC error in the WEP bit, which was followed by a failure to decrypt and was dropped. The next frames in the aggregation "inherit" the bad metadata of the first packet. Make sure to drop such frames since RADA and other offloads will not operate correctly which may have unexpected results. In case of AP it also causes to TX AMSDU frames to the peers, resulting with assert 0x104B. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/rx.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index e7565f37ece9..7e570c4a9df0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -295,6 +295,7 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, + IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index bb63e75a9b7f..2b1f0dc73c25 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -227,12 +227,24 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, - struct ieee80211_rx_status *stats, - struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, - int queue, u8 *crypt_len) + struct ieee80211_rx_status *stats, u16 phy_info, + struct iwl_rx_mpdu_desc *desc, + u32 pkt_flags, int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); + /* + * Drop UNKNOWN frames in aggregation, unless in monitor mode + * (where we don't have the keys). + * We limit this to aggregation because in TKIP this is a valid + * scenario, since we may not have the (correct) TTAK (phase 1 + * key) in the firmware. + */ + if (phy_info & IWL_RX_MPDU_PHY_AMPDU && + (status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) + return -1; + if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) @@ -870,7 +882,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); -- cgit v1.2.3 From 15c4e33030d172ac0d2a03ebbae4c3687a80ac7d Mon Sep 17 00:00:00 2001 From: Aviya Erenfeld Date: Mon, 26 Mar 2018 14:35:32 +0300 Subject: iwlmvm: tdls: Check TDLS channel switch support Some versions of the FW don't support channel switch in TDLS. Add a condition that checks it. Fixes: 307e47235a10 ("iwlwifi: mvm: configure TDLS peers to FW") Signed-off-by: Aviya Erenfeld Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tdls.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 3d97436bbdf5..67f360c0d17e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -7,6 +7,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program. * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -188,8 +188,14 @@ void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (tdls_sta_cnt == 1 && sta_added) iwl_mvm_power_update_mac(mvm); - /* configure the FW with TDLS peer info */ - iwl_mvm_tdls_config(mvm, vif); + /* Configure the FW with TDLS peer info only if TDLS channel switch + * capability is set. + * TDLS config data is used currently only in TDLS channel switch code. + * Supposed to serve also TDLS buffer station which is not implemneted + * yet in FW*/ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) + iwl_mvm_tdls_config(mvm, vif); /* when the last peer leaves, send a power update last */ if (tdls_sta_cnt == 0 && !sta_added) -- cgit v1.2.3 From 0eac9abace1629c20420d9122d6edc80d292b0ec Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 8 Apr 2018 12:33:31 +0300 Subject: iwlwifi: mvm: fix TSO with highly fragmented SKBs Our hardware has a limited amount of buffer descriptors for each Tx packet. Because of that, there is a short piece of code that makes sure that that we don't push too many subframes in an A-MSDU because of subframes needs 2 buffer descriptors. This code also takes into account the number of fragment of the skb since we also need a buffer descriptor for each fragment in the skb. This piece of code though didn't check that the resulting number of subframes wasn't 0. A user reported that using NFS client, he could get skbs that are so fragmented that the code mentioned above returned 0 for the number of subframes making skb_gso_segment fail and subconsequently iwlwifi would WARN. Fix this by make sure that num_subframes is at least 1. This fixes: https://bugzilla.kernel.org/show_bug.cgi?id=199209 Fixes: a6d5e32f247c ("iwlwifi: mvm: send large SKBs to the transport") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index df4c60496f72..0b7f98d00298 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -843,8 +843,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); - if (num_subframes > 1) - *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; @@ -855,10 +853,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * 1 more for each fragment * 1 more for the potential data in the header */ - num_subframes = - min_t(unsigned int, num_subframes, - (mvm->trans->max_skb_frags - 1 - - skb_shinfo(skb)->nr_frags) / 2); + if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > + mvm->trans->max_skb_frags) + num_subframes = 1; + + if (num_subframes > 1) + *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { -- cgit v1.2.3 From 0f22e40053bd5378ad1e3250e65c574fd61c0cd6 Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Thu, 22 Mar 2018 14:14:45 +0200 Subject: iwlwifi: pcie: fix race in Rx buffer allocator Make sure the rx_allocator worker is canceled before running the rx_init routine. rx_init frees and re-allocates all rxb's pages. The rx_allocator worker also allocates pages for the used rxb's. Running rx_init and rx_allocator simultaniously causes a kernel panic. Fix that by canceling the work in rx_init. Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f772d70a65e4..d15f5ba2dc77 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -902,6 +902,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) } def_rxq = trans_pcie->rxq; + cancel_work_sync(&rba->rx_alloc); + spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); -- cgit v1.2.3 From b38c395f16562423f438065ad8d88c20ad92cab3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Apr 2018 14:05:33 +0100 Subject: iwlwifi: mvm: remove division by size of sizeof(struct ieee80211_wmm_rule) The subtraction of two struct ieee80211_wmm_rule pointers leaves a result that is automatically scaled down by the size of the size of pointed-to type, hence the division by sizeof(struct ieee80211_wmm_rule) is bogus and should be removed. Detected by CoverityScan, CID#1467777 ("Extra sizeof expression") Fixes: 77e30e10ee28 ("iwlwifi: mvm: query regdb for wmm rule if needed") Signed-off-by: Colin Ian King Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 0f9d56420c42..b815ba38dbdb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1025,8 +1025,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, continue; copy_rd->reg_rules[i].wmm_rule = d_wmm + - (regd->reg_rules[i].wmm_rule - s_wmm) / - sizeof(struct ieee80211_wmm_rule); + (regd->reg_rules[i].wmm_rule - s_wmm); } out: -- cgit v1.2.3 From d94c5a820d107fdde711ec72c16848876027713d Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Tue, 24 Apr 2018 06:26:41 +0300 Subject: iwlwifi: mvm: open BA session only when sta is authorized Currently, a BA session is opened when the tx traffic exceeds 10 frames per second. As a result of inter-op problems with some APs, add a condition to open BA session only when station is already authorized. Fixes: 482e48440a0e ("iwlwifi: mvm: change open and close criteria of a BA session") Signed-off-by: Gregory Greenman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 8 ++--- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 38 +++++++++++------------ drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 7 ++--- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 10 +++--- 5 files changed, 29 insertions(+), 36 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5f0701c992a4..f3cab26f81b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2685,7 +2685,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ - mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; + mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { @@ -2737,8 +2737,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { @@ -2754,8 +2753,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index af7bc3b7b0c3..642da10b0b7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3,6 +3,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -13,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -651,9 +648,10 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, } tid_data = &mvmsta->tid_data[tid]; - if ((tid_data->state == IWL_AGG_OFF) && + if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED && + tid_data->state == IWL_AGG_OFF && (lq_sta->tx_agg_tid_en & BIT(tid)) && - (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) { IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) tid_data->state = IWL_AGG_QUEUED; @@ -1257,7 +1255,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - iwl_mvm_rs_rate_init(mvm, sta, info->band, false); + iwl_mvm_rs_rate_init(mvm, sta, info->band); return; } lq_sta->last_tx = jiffies; @@ -2690,9 +2688,9 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, - struct rs_rate *rate, - bool init) + struct rs_rate *rate) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i, nentries; unsigned long active_rate; s8 best_rssi = S8_MIN; @@ -2754,7 +2752,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, * bandwidth rate, and after authorization, when the phy context * is already up-to-date, re-init rs with the correct bw. */ - u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ? + RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); switch (bw) { case RATE_MCS_CHAN_WIDTH_40: @@ -2839,9 +2838,9 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum nl80211_band band, - bool init) + enum nl80211_band band) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; @@ -2857,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, @@ -2870,7 +2869,8 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, @@ -3123,7 +3123,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init) + enum nl80211_band band) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3203,7 +3203,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_initialize_lq(mvm, sta, lq_sta, band, init); + rs_initialize_lq(mvm, sta, lq_sta, band); } static void rs_drv_rate_update(void *mvm_r, @@ -3223,7 +3223,7 @@ static void rs_drv_rate_update(void *mvm_r, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); + iwl_mvm_rs_rate_init(mvm, sta, sband->band); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -4069,12 +4069,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init) + enum nl80211_band band) { if (iwl_mvm_has_tlc_offload(mvm)) rs_fw_rate_init(mvm, sta, band); else - rs_drv_rate_init(mvm, sta, band, init); + rs_drv_rate_init(mvm, sta, band); } int iwl_mvm_rate_control_register(void) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 5e89141656c0..cffb8c852934 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -13,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -410,7 +407,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init); + enum nl80211_band band); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4ddd2c427b83..9263b9aa8b72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -216,7 +216,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); - if (mvm_sta->associated) + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 60502c81dfce..1c43ea8dd8cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -376,6 +373,7 @@ struct iwl_mvm_rxq_dup_data { * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type + * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP @@ -416,6 +414,7 @@ struct iwl_mvm_sta { u16 tid_disable_agg; u8 max_agg_bufsize; enum iwl_sta_type sta_type; + enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; @@ -441,7 +440,6 @@ struct iwl_mvm_sta { u16 amsdu_enabled; u16 max_amsdu_len; bool sleeping; - bool associated; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; -- cgit v1.2.3 From 5dd9f6c703e8161bf69fb9d004528bfb639548ce Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 17 May 2018 10:44:20 +0300 Subject: iwlwifi: mvm: honor the max_amsdu_subframes limit A peer can limit the number of subframes it can handle in a single A-MSDU. Honor this limit. Note that the smallest limit is 8, and we are very unlikely to reach that limit. So this isn't really a big deal. Fixes: a6d5e32f247c ("iwlwifi: mvm: send large SKBs to the transport") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 0b7f98d00298..cf2591f2ac23 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -844,6 +844,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); + if (sta->max_amsdu_subframes && + num_subframes > sta->max_amsdu_subframes) + num_subframes = sta->max_amsdu_subframes; + tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; -- cgit v1.2.3 From 42116705a7b17ddc321b6e9999b8df2dc967c357 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 22 May 2018 15:08:32 +0200 Subject: iwlwifi: mvm: fix race in queue notification wait Initially in this code, the race didn't matter since it didn't do anything. Latest with the commit I marked this as fixing it started to matter as something got done here that needed other data that got freed as soon as the queue notification wait was returning. In the scenario we saw, apparently the IWL_MVM_RXQ_NOTIF_DEL_BA event was sent to all queues, but processing the last event we returned from iwl_mvm_sync_rx_queues_internal() and then from iwl_mvm_free_reorder() and continued some processing before wl_mvm_del_ba() was even invoked on the other CPU. Thus, when the latter finally ran, it found that mvm->baid_map[baid] was no longer valid. Correct the race by moving the counter decrement and wake_up() to be done only after all the per-event processing completed. Note that in the commit I marked as being fixed the wake_up() didn't exist yet (and the code was otherwise problematic) but this particular problem already existed in a way. Fixes: b915c10174fb ("iwlwifi: mvm: add reorder buffer per queue") Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 2b1f0dc73c25..129c4c09648d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -590,14 +590,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, notif = (void *)pkt->data; internal_notif = (void *)notif->payload; - if (internal_notif->sync) { - if (mvm->queue_sync_cookie != internal_notif->cookie) { - WARN_ONCE(1, - "Received expired RX queue sync message\n"); - return; - } - if (!atomic_dec_return(&mvm->queue_sync_counter)) - wake_up(&mvm->rx_sync_waitq); + if (internal_notif->sync && + mvm->queue_sync_cookie != internal_notif->cookie) { + WARN_ONCE(1, "Received expired RX queue sync message\n"); + return; } switch (internal_notif->type) { @@ -609,6 +605,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } + + if (internal_notif->sync && + !atomic_dec_return(&mvm->queue_sync_counter)) + wake_up(&mvm->rx_sync_waitq); } /* -- cgit v1.2.3 From 506247825c5e877741f33a2c657d351e5c7943ba Mon Sep 17 00:00:00 2001 From: Erel Geron Date: Mon, 28 May 2018 17:09:30 +0300 Subject: iwlwifi: fix non_shared_ant for 9000 devices The non-shared antenna was wrong for 9000 device series. Fix it to ANT_B for correct antenna preference by coex in MVM driver. Fixes: 89374fe60bfb ("iwlwifi: Add new PCI IDs for 9260 and 5165 series") Signed-off-by: Erel Geron Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 9706624911f8..e20c30b29c03 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -137,7 +137,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { .base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \ - .non_shared_ant = ANT_A, \ + .non_shared_ant = ANT_B, \ .dccm_offset = IWL9000_DCCM_OFFSET, \ .dccm_len = IWL9000_DCCM_LEN, \ .dccm2_offset = IWL9000_DCCM2_OFFSET, \ -- cgit v1.2.3 From 3ded9f2b35f02ca00523c79f88f6cf5050622384 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 May 2018 17:49:46 +0200 Subject: net: ethernet: freescale: fix false-positive string overflow warning While compile-testing on arm64 with gcc-8.1, I ran into a build diagnostic: drivers/net/ethernet/freescale/fec_main.c: In function 'fec_probe': drivers/net/ethernet/freescale/fec_main.c:3517:25: error: '%d' directive writing between 1 and 10 bytes into a region of size 5 [-Werror=format-overflow=] sprintf(irq_name, "int%d", i); ^~ drivers/net/ethernet/freescale/fec_main.c:3517:21: note: directive argument in the range [0, 2147483646] sprintf(irq_name, "int%d", i); ^~~~~~~ drivers/net/ethernet/freescale/fec_main.c:3517:3: note: 'sprintf' output between 5 and 14 bytes into a destination of size 8 sprintf(irq_name, "int%d", i); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It appears this has never shown on ppc32 or arm32 for an unknown reason, but now gcc fails to identify that the 'irq_cnt' loop index has an upper bound of 3, and instead uses a bogus range. To work around the warning, this changes the sprintf to snprintf with the correct buffer length. Fixes: 78cc6e7ef957 ("net: ethernet: freescale: Allow FEC with COMPILE_TEST") Signed-off-by: Arnd Bergmann Acked-by: Fugang Duan Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ab7521c04eb2..c729665107f5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3514,7 +3514,7 @@ fec_probe(struct platform_device *pdev) goto failed_init; for (i = 0; i < irq_cnt; i++) { - sprintf(irq_name, "int%d", i); + snprintf(irq_name, sizeof(irq_name), "int%d", i); irq = platform_get_irq_byname(pdev, irq_name); if (irq < 0) irq = platform_get_irq(pdev, i); -- cgit v1.2.3 From 37c9102f216c2ecd4296de1027fbfc4fa7d73877 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 28 May 2018 17:50:20 +0200 Subject: net: davinci: fix building davinci mdio code without CONFIG_OF Test-building this driver on targets without CONFIG_OF revealed a build failure: drivers/net/ethernet/ti/davinci_mdio.c: In function 'davinci_mdio_probe': drivers/net/ethernet/ti/davinci_mdio.c:380:9: error: implicit declaration of function 'davinci_mdio_probe_dt'; did you mean 'davinci_mdio_probe'? [-Werror=implicit-function-declaration] This adjusts the #ifdef logic in the driver to make it build in all configurations. Fixes: 2652113ff043 ("net: ethernet: ti: Allow most drivers with COMPILE_TEST") Signed-off-by: Arnd Bergmann Acked-by: Sekhar Nori Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_mdio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 8ac72831af05..a98aedae1b41 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -321,7 +321,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return ret; } -#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -339,7 +338,6 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return 0; } -#endif #if IS_ENABLED(CONFIG_OF) static const struct davinci_mdio_of_param of_cpsw_mdio_data = { @@ -374,7 +372,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - if (dev->of_node) { + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { const struct of_device_id *of_id; ret = davinci_mdio_probe_dt(&data->pdata, pdev); -- cgit v1.2.3 From d602de8e7e7fc25fb3a2112ce4285962f15aa549 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 28 May 2018 19:51:57 -0700 Subject: drivers/net: Fix various unnecessary characters after logging newlines Remove and coalesce formats when there is an unnecessary character after a logging newline. These extra characters cause logging defects. Miscellanea: o Coalesce formats Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c | 6 ++---- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 4 ++-- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 3 +-- drivers/net/wireless/intersil/prism54/islpci_eth.c | 6 +++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c | 2 +- 11 files changed, 18 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e500528ad751..8a815bb57177 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1766,7 +1766,7 @@ static int load_firmware(struct octeon_device *oct) ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); if (ret) { - dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", fw_name); release_firmware(fw); return ret; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 6cec2a6a3dcc..7503aa222392 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { - dev_err(&adapter->pdev->dev, "Failed to get minidump template, " - "err_code : %d, requested_size : %d, actual_size : %d\n ", + dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", cmd.rsp.cmd, size, cmd.rsp.arg2); } pci_free_consistent(adapter->pdev, size, addr, md_template_addr); @@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter) if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, - "Flashed firmware version does not support minidump, " - "minimum version required is [ %u.%u.%u ].\n ", + "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 560528962658..fde20fd9942c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Sending final cleanup for PFVF[%d] [Command %08x\n]", + "Sending final cleanup for PFVF[%d] [Command %08x]\n", id, command); qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8293c2028002..70de062b72a1 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; @@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index f5b405c98047..b6122aad639e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1264,7 +1264,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) brcmf_dbg(TRACE, "Enter\n"); if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { - brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); + brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); err = brcmf_fil_cmd_data_set(vif->ifp, BRCMF_C_DISASSOC, NULL, 0); if (err) { diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ba3fb1d2ddb4..f26beeb6c5ff 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7557,8 +7557,7 @@ static int ipw_associate(void *data) } if (priv->status & STATUS_DISASSOCIATING) { - IPW_DEBUG_ASSOC("Not attempting association (in " - "disassociating)\n "); + IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n"); schedule_work(&priv->associate); return 0; } diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c index 9b0ded733294..b277113b33d3 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.c +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c @@ -57,7 +57,7 @@ islpci_eth_cleanup_transmit(islpci_private *priv, #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "cleanup skb %p skb->data %p skb->len %u truesize %u\n ", + "cleanup skb %p skb->data %p skb->len %u truesize %u\n", skb, skb->data, skb->len, skb->truesize); #endif @@ -328,7 +328,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ", + "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n", control_block->rx_data_low[priv->free_data_rx].address, skb->data, skb->len, offset, skb->truesize); #endif @@ -436,7 +436,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ", + "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n", skb, skb->data, skb->len, index, skb->truesize); #endif diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 38b2ba1ac6f8..380e86f9e00b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1267,8 +1267,8 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index c4b86a84a721..26b674aca125 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1175,8 +1175,8 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 718a73c623a7..505ab1b055ff 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3406,8 +3406,8 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 9111ba7ff0a1..3be8c88971e2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -2642,7 +2642,7 @@ static void rtl8821ae_dm_edca_choose_traffic_idx( if (cur_tx_bytes > (cur_rx_bytes*4)) { *pb_is_cur_rdl_state = false; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, - "Uplink Traffic\n "); + "Uplink Traffic\n"); } else { *pb_is_cur_rdl_state = true; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, -- cgit v1.2.3 From 6d89265d78417174f344c4ec4c13347f178ea0da Mon Sep 17 00:00:00 2001 From: Andrey Shevchenko Date: Tue, 29 May 2018 14:59:57 +0300 Subject: qtnfmac: remove unused function declarations Functions qtnf_cmd_resp_parse and qtnf_cmd_resp_check have been removed. Remove their declarations as well. Signed-off-by: Andrey Shevchenko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 69a7d56f7e58..cf9274add26d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -58,11 +58,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, struct station_parameters *params); int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, struct station_del_parameters *params); - -int qtnf_cmd_resp_parse(struct qtnf_bus *bus, struct sk_buff *resp_skb); -int qtnf_cmd_resp_check(const struct qtnf_vif *vif, - const struct sk_buff *resp_skb, u16 cmd_id, - u16 *result, const u8 **payload, size_t *payload_size); int qtnf_cmd_send_scan(struct qtnf_wmac *mac); int qtnf_cmd_send_connect(struct qtnf_vif *vif, struct cfg80211_connect_params *sme); -- cgit v1.2.3 From d62b622ca4099517878db5b7bfb06bc449513365 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 29 May 2018 14:59:58 +0300 Subject: qtnfmac: simplify notation Shorten line lengths using a more compact notation to access mac info. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 25 ++++++++++++----------- 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 5122dc798064..bf624d975953 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -955,6 +955,7 @@ qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info) int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) { struct wiphy *wiphy = priv_to_wiphy(mac); + struct qtnf_mac_info *macinfo = &mac->macinfo; int ret; if (!wiphy) { @@ -962,20 +963,20 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) return -EFAULT; } - wiphy->frag_threshold = mac->macinfo.frag_thr; - wiphy->rts_threshold = mac->macinfo.rts_thr; - wiphy->retry_short = mac->macinfo.sretry_limit; - wiphy->retry_long = mac->macinfo.lretry_limit; - wiphy->coverage_class = mac->macinfo.coverage_class; + wiphy->frag_threshold = macinfo->frag_thr; + wiphy->rts_threshold = macinfo->rts_thr; + wiphy->retry_short = macinfo->sretry_limit; + wiphy->retry_long = macinfo->lretry_limit; + wiphy->coverage_class = macinfo->coverage_class; wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH; wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN; wiphy->mgmt_stypes = qtnf_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; - wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs; + wiphy->max_acl_mac_addrs = macinfo->max_acl_mac_addrs; wiphy->max_num_csa_counters = 2; - ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo); + ret = qtnf_wiphy_setup_if_comb(wiphy, macinfo); if (ret) goto out; @@ -994,12 +995,12 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; - wiphy->available_antennas_tx = mac->macinfo.num_tx_chain; - wiphy->available_antennas_rx = mac->macinfo.num_rx_chain; + wiphy->available_antennas_tx = macinfo->num_tx_chain; + wiphy->available_antennas_rx = macinfo->num_rx_chain; - wiphy->max_ap_assoc_sta = mac->macinfo.max_ap_assoc_sta; - wiphy->ht_capa_mod_mask = &mac->macinfo.ht_cap_mod_mask; - wiphy->vht_capa_mod_mask = &mac->macinfo.vht_cap_mod_mask; + wiphy->max_ap_assoc_sta = macinfo->max_ap_assoc_sta; + wiphy->ht_capa_mod_mask = &macinfo->ht_cap_mod_mask; + wiphy->vht_capa_mod_mask = &macinfo->vht_cap_mod_mask; ether_addr_copy(wiphy->perm_addr, mac->macaddr); -- cgit v1.2.3 From 36e8c538b374d07b69961cb1980f8bb39061b822 Mon Sep 17 00:00:00 2001 From: Igor Mitsyanko Date: Tue, 29 May 2018 14:59:59 +0300 Subject: qtnfmac: decode error codes from firmware replies Introduce a function that will map an error code reported in reply to a firmware command, into one of standard errno codes. Use additional error codes to improve error reporting for MAC address changes. Signed-off-by: Igor Mitsyanko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 26 +++++++++++++++++++++-- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 2 ++ 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index deca0060eb27..9dc4560be5d8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -55,6 +55,28 @@ static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp, return 0; } +static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) +{ + switch (qcode) { + case QLINK_CMD_RESULT_OK: + return 0; + case QLINK_CMD_RESULT_INVALID: + return -EINVAL; + case QLINK_CMD_RESULT_ENOTSUPP: + return -ENOTSUPP; + case QLINK_CMD_RESULT_ENOTFOUND: + return -ENOENT; + case QLINK_CMD_RESULT_EALREADY: + return -EALREADY; + case QLINK_CMD_RESULT_EADDRINUSE: + return -EADDRINUSE; + case QLINK_CMD_RESULT_EADDRNOTAVAIL: + return -EADDRNOTAVAIL; + default: + return -EFAULT; + } +} + static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, struct sk_buff *cmd_skb, struct sk_buff **response_skb, @@ -810,10 +832,10 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, if (unlikely(ret)) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + ret = qtnf_cmd_resp_result_decode(res_code); + if (ret) { pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid, vif->vifid, cmd_type, res_code); - ret = -EFAULT; goto out; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 9ab27e158023..f85deda703fb 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -674,6 +674,8 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_ENOTSUPP, QLINK_CMD_RESULT_ENOTFOUND, QLINK_CMD_RESULT_EALREADY, + QLINK_CMD_RESULT_EADDRINUSE, + QLINK_CMD_RESULT_EADDRNOTAVAIL, }; /** -- cgit v1.2.3 From 9a3beeb5b73a55d8d4ada9cf17bec24ad1f31394 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 29 May 2018 15:00:00 +0300 Subject: qtnfmac: cleanup wdev structure between its uses Driver uses statically allocated wdev structures for each virtual interface. However wdev structure is not properly cleaned up between its uses. As a result, various bugs appear when userspace tools like hostapd were not gracefully stopped. In particular, this commit fixes the following issue: - start hostapd with more than 2 mBSS - kill hostapd using SIGKILL - start again hostapd with more than 2 mBSS However only two mBSS entities will be started: primary and the last BSS listed in hostapd config. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 8 +++----- drivers/net/wireless/quantenna/qtnfmac/core.c | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index bf624d975953..2089cb095283 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -177,8 +177,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) vif->netdev->ieee80211_ptr = NULL; vif->netdev = NULL; vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - eth_zero_addr(vif->mac_addr); - eth_zero_addr(vif->bssid); return 0; } @@ -216,10 +214,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, } eth_zero_addr(vif->mac_addr); + eth_zero_addr(vif->bssid); vif->bss_priority = QTNF_DEF_BSS_PRIORITY; + vif->sta_state = QTNF_STA_DISCONNECTED; + memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.wiphy = wiphy; vif->wdev.iftype = type; - vif->sta_state = QTNF_STA_DISCONNECTED; break; default: pr_err("MAC%u: unsupported IF type %d\n", mac->macid, type); @@ -255,8 +255,6 @@ err_mac: qtnf_cmd_send_del_intf(vif); err_cmd: vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - eth_zero_addr(vif->mac_addr); - eth_zero_addr(vif->bssid); return ERR_PTR(-EFAULT); } diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index b3bfb4faa918..3ccbc427cf56 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -394,7 +394,6 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name, name_assign_type, ether_setup, 1, 1); if (!dev) { - memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; return -ENOMEM; } -- cgit v1.2.3 From 9e33e7fb47728cb53dbba09415d95d3ce0ea1ce4 Mon Sep 17 00:00:00 2001 From: Dmitry Lebed Date: Tue, 29 May 2018 15:00:01 +0300 Subject: qtnfmac: improve control path timeout handling Control path will not be operational after firmware failure. Change bus state to QTNF_FW_STATE_EP_DEAD after the control path timeout. Don't wait for timeout if control path is already dead. Signed-off-by: Dmitry Lebed Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/bus.h | 3 ++- drivers/net/wireless/quantenna/qtnfmac/core.c | 2 +- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 0a1604683bab..323e47cea1e2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -27,7 +27,8 @@ enum qtnf_fw_state { QTNF_FW_STATE_FW_DNLD_DONE, QTNF_FW_STATE_BOOT_DONE, QTNF_FW_STATE_ACTIVE, - QTNF_FW_STATE_DEAD, + QTNF_FW_STATE_DETACHED, + QTNF_FW_STATE_EP_DEAD, }; struct qtnf_bus; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 3ccbc427cf56..a6a450984f9a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -628,7 +628,7 @@ void qtnf_core_detach(struct qtnf_bus *bus) if (bus->fw_state == QTNF_FW_STATE_ACTIVE) qtnf_cmd_send_deinit_fw(bus); - bus->fw_state = QTNF_FW_STATE_DEAD; + bus->fw_state = QTNF_FW_STATE_DETACHED; if (bus->workqueue) { flush_workqueue(bus->workqueue); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 6c1e139bb8f7..3120d49df565 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -751,8 +751,16 @@ tx_done: static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) { struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); + int ret; + + ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); - return qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); + if (ret == -ETIMEDOUT) { + pr_err("EP firmware is dead\n"); + bus->fw_state = QTNF_FW_STATE_EP_DEAD; + } + + return ret; } static irqreturn_t qtnf_interrupt(int irq, void *data) @@ -1238,7 +1246,7 @@ static void qtnf_fw_work_handler(struct work_struct *work) goto fw_load_exit; fw_load_fail: - bus->fw_state = QTNF_FW_STATE_DEAD; + bus->fw_state = QTNF_FW_STATE_DETACHED; fw_load_exit: complete(&bus->firmware_init_complete); @@ -1408,7 +1416,8 @@ static void qtnf_pcie_remove(struct pci_dev *pdev) wait_for_completion(&bus->firmware_init_complete); - if (bus->fw_state == QTNF_FW_STATE_ACTIVE) + if (bus->fw_state == QTNF_FW_STATE_ACTIVE || + bus->fw_state == QTNF_FW_STATE_EP_DEAD) qtnf_core_detach(bus); priv = get_bus_priv(bus); -- cgit v1.2.3 From b60769e2dff0b168dfe3f03fe0d1950963c3c3da Mon Sep 17 00:00:00 2001 From: Dmitry Lebed Date: Tue, 29 May 2018 15:00:02 +0300 Subject: qtnfmac: fix firmware command error path Free command skb if bus state is not QTNF_FW_STATE_ACTIVE. Signed-off-by: Dmitry Lebed Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 1 + drivers/net/wireless/quantenna/qtnfmac/trans.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 9dc4560be5d8..e2fc57be1cdd 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -102,6 +102,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", mac_id, vif_id, le16_to_cpu(cmd->cmd_id), bus->fw_state); + dev_kfree_skb(cmd_skb); return -ENODEV; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c index ccddfebc508a..345f34ec9750 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.c +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c @@ -35,8 +35,10 @@ int qtnf_trans_send_cmd_with_resp(struct qtnf_bus *bus, struct sk_buff *cmd_skb, bool resp_not_handled = true; struct sk_buff *resp_skb = NULL; - if (unlikely(!response_skb)) + if (unlikely(!response_skb)) { + dev_kfree_skb(cmd_skb); return -EFAULT; + } spin_lock(&ctl_node->resp_lock); ctl_node->seq_num++; -- cgit v1.2.3 From f5d2ff43b9ed9f56e7ff67640aa593fcb8fa56c1 Mon Sep 17 00:00:00 2001 From: Andrey Shevchenko Date: Tue, 29 May 2018 15:00:03 +0300 Subject: qtnfmac: fix bg_scan_period parameter processing Do not process bg_scan_period parameter in qtnfmac driver. Pass correct values as is. In the case of invalid values pass default value. Leave further processing to firmware. Signed-off-by: Andrey Shevchenko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 8 +++----- drivers/net/wireless/quantenna/qtnfmac/core.h | 2 -- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index e2fc57be1cdd..5eb143667539 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -2339,13 +2339,11 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, else eth_zero_addr(cmd->prev_bssid); - if ((sme->bg_scan_period > 0) && - (sme->bg_scan_period <= QTNF_MAX_BG_SCAN_PERIOD)) + if ((sme->bg_scan_period >= 0) && + (sme->bg_scan_period <= SHRT_MAX)) cmd->bg_scan_period = cpu_to_le16(sme->bg_scan_period); - else if (sme->bg_scan_period == -1) - cmd->bg_scan_period = cpu_to_le16(QTNF_DEFAULT_BG_SCAN_PERIOD); else - cmd->bg_scan_period = 0; /* disabled */ + cmd->bg_scan_period = cpu_to_le16(-1); /* use default value */ if (sme->flags & ASSOC_REQ_DISABLE_HT) connect_flags |= QLINK_STA_CONNECT_DISABLE_HT; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 3b884c80b6ab..214435448335 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -44,8 +44,6 @@ #define QTNF_MAX_VSIE_LEN 255 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 -#define QTNF_DEFAULT_BG_SCAN_PERIOD 300 -#define QTNF_MAX_BG_SCAN_PERIOD 0xffff #define QTNF_SCAN_TIMEOUT_SEC 15 #define QTNF_DEF_BSS_PRIORITY 0 -- cgit v1.2.3 From 40d68dbb986d2116c15e70f63c419a5481d7b91c Mon Sep 17 00:00:00 2001 From: Andrey Shevchenko Date: Tue, 29 May 2018 15:00:04 +0300 Subject: qtnfmac: cancel scan on disconnect Cancel scan operation on STA disconnect. Signed-off-by: Andrey Shevchenko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 2089cb095283..1fcd94bf7c59 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -660,6 +660,8 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, if (vif->wdev.iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; + qtnf_scan_done(mac, true); + if (vif->sta_state == QTNF_STA_DISCONNECTED) return 0; -- cgit v1.2.3 From 480daa9cb62c14bbd1b87a01cd9bc10cc56dbf32 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 29 May 2018 15:00:05 +0300 Subject: qtnfmac: fix invalid STA state on EAPOL failure Driver switches vif sta_state into QTNF_STA_CONNECTING when cfg80211 core initiates connect procedure. Further this state is changed either to QTNF_STA_CONNECTED or to QTNF_STA_DISCONNECTED by BSS_JOIN and BSS_LEAVE events from firmware. However it is possible that no such events will be sent by firmware, e.g. if EAPOL timed out. In this case vif sta_mode will remain in QTNF_STA_CONNECTING state and all subsequent connection attempts will fail with -EBUSY error code. Fix this by perfroming STA state transition from QTNF_STA_CONNECTING to QTNF_STA_DISCONNECTED in cfg80211 disconnect callback. No need to rely upon firmware events in this case. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 21 ++++++++++++++------- drivers/net/wireless/quantenna/qtnfmac/event.c | 8 +++----- 2 files changed, 17 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 1fcd94bf7c59..220e2b710208 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -649,30 +649,37 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct qtnf_wmac *mac = wiphy_priv(wiphy); struct qtnf_vif *vif; - int ret; + int ret = 0; vif = qtnf_mac_get_base_vif(mac); if (!vif) { pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - return -EFAULT; + ret = -EFAULT; + goto out; } - if (vif->wdev.iftype != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; + if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { + ret = -EOPNOTSUPP; + goto out; + } qtnf_scan_done(mac, true); if (vif->sta_state == QTNF_STA_DISCONNECTED) - return 0; + goto out; ret = qtnf_cmd_send_disconnect(vif, reason_code); if (ret) { pr_err("VIF%u.%u: failed to disconnect\n", mac->macid, vif->vifid); - return ret; + goto out; } - return 0; +out: + if (vif->sta_state == QTNF_STA_CONNECTING) + vif->sta_state = QTNF_STA_DISCONNECTED; + + return ret; } static int diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 16617c44f81b..68da81bec4e9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -211,11 +211,9 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTED) { - pr_err("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", - vif->mac->macid, vif->vifid); - return -EPROTO; - } + if (vif->sta_state != QTNF_STA_CONNECTED) + pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", + vif->mac->macid, vif->vifid); pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); -- cgit v1.2.3 From 32d26a685c1802a0e485bd674e7dd038e88019f7 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Tue, 29 May 2018 02:31:24 -0700 Subject: qed*: Add link change count value to ethtool statistics display. This patch adds driver changes for capturing the link change count in ethtool statistics display. Please consider applying this to "net-next". Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 12 ++++++++++-- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 2 ++ drivers/net/ethernet/qlogic/qede/qede_main.c | 1 + include/linux/qed/qed_if.h | 1 + 5 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1c0d0c217936..eed4725761f5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1854,6 +1854,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_change_count)); } static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, @@ -1961,11 +1966,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ - if (!cdev->reset_stats) + if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); - else + } else { _qed_get_vport_stats(cdev, cdev->reset_stats); + cdev->reset_stats->common.link_change_count = 0; + } } static enum gft_profile_type diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 81c5c8dfa2ef..d7ed0d3dbf71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -88,6 +88,7 @@ struct qede_stats_common { u64 coalesced_aborts_num; u64 non_coalesced_pkts; u64 coalesced_bytes; + u64 link_change_count; /* port */ u64 rx_64_byte_packets; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6906e04b609e..f4a0f8ff8261 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -171,6 +171,8 @@ static const struct { QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), + + QEDE_STAT(link_change_count), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d118771e1a7b..6a796040a32c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -399,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + p_common->link_change_count = stats.common.link_change_count; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index ac991a3b8f03..b4040023cbfb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1180,6 +1180,7 @@ struct qed_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; + u64 link_change_count; }; struct qed_eth_stats_bb { -- cgit v1.2.3 From ea4721751977e14bf2ba5bf6c764afb4c21354dd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 30 May 2018 02:57:46 +0200 Subject: mlxsw: spectrum_switchdev: Ignore bridge VLAN events A follow-up patch enables emitting VLAN notifications for the bridge CPU port in addition to the existing slave port notifications. These notifications have orig_dev set to the bridge in question. Because there's no specific support for these VLANs, just ignore the notifications to maintain the current behavior. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8c9cf8ee9398..cbc8fabc90c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1144,6 +1144,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -1741,6 +1744,9 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_port)) return -EINVAL; -- cgit v1.2.3 From 2855118fdafa753d284b80abd7835a530b5a6fcc Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 30 May 2018 02:58:36 +0200 Subject: rocker: rocker_main: Ignore bridge VLAN events A follow-up patch enables emitting VLAN notifications for the bridge CPU port in addition to the existing slave port notifications. These notifications have orig_dev set to the bridge in question. Because there's no specific support for these VLANs, just ignore the notifications to maintain the current behavior. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index e73e4febeedb..aeafdb9ac015 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1632,6 +1632,9 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_add) return -EOPNOTSUPP; @@ -1647,6 +1650,9 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_del) return -EOPNOTSUPP; return wops->port_obj_vlan_del(rocker_port, vlan); -- cgit v1.2.3 From 7edcb8ecbe6a285fc4608c22ad5d5c35b8809ff7 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 30 May 2018 03:00:49 +0200 Subject: mlxsw: spectrum_switchdev: Schedule respin during trans prepare Since there's no special support for the bridge events, the driver returns -EOPNOTSUPP, and thus the commit never happens. Therefore schedule respin during the prepare stage: there's no real difference one way or another. This fixes the problem that mirror-to-gretap offload wouldn't adapt to changes in bridge vlan configuration right away and another notification would have to arrive for mlxsw to catch up. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index cbc8fabc90c5..8a15ac49cb5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1697,7 +1697,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); - if (switchdev_trans_ph_commit(trans)) { + if (switchdev_trans_ph_prepare(trans)) { /* The event is emitted before the changes are actually * applied to the bridge. Therefore schedule the respin * call for later, so that the respin logic sees the -- cgit v1.2.3 From c5f732d7deb81a3c76c6d9b1045da800b22b196b Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 30 May 2018 17:15:50 +0530 Subject: cxgb4: Add FORCE_PAUSE bit to 32 bit port caps Add FORCE_PAUSE bit to force local pause settings instead of using auto negotiated values. Signed-off-by: Santosh Rastapur Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 10 +++++++++- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 5 +++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 39da7e3c804b..974a868a4824 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3941,6 +3941,7 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(FORCE_PAUSE); CAP16_TO_CAP32(MDIAUTO); CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); @@ -3982,6 +3983,7 @@ static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(FORCE_PAUSE); CAP32_TO_CAP16(MDIAUTO); CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); @@ -4014,6 +4016,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) fw_pause |= FW_PORT_CAP32_FC_RX; if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_FC_TX; + if (!(cc_pause & PAUSE_AUTONEG)) + fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; return fw_pause; } @@ -4101,7 +4105,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } - if (rcap & ~lc->pcaps) { + /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so + * we need to exclude this from this check in order to maintain + * compatibility ... + */ + if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", rcap, lc->pcaps); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 2d91480a5a0e..f1967cf6d43c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2475,7 +2475,7 @@ enum fw_port_cap { FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, - FW_PORT_CAP_FEC_RESERVED = 0x2000, + FW_PORT_CAP_FORCE_PAUSE = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; @@ -2522,7 +2522,8 @@ enum fw_port_mdi { #define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL #define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL #define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL -#define FW_PORT_CAP32_RESERVED2 0xf0000000UL +#define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL +#define FW_PORT_CAP32_RESERVED2 0xe0000000UL #define FW_PORT_CAP32_SPEED_S 0 #define FW_PORT_CAP32_SPEED_M 0xfff -- cgit v1.2.3 From 1865ea9adbfaf341c5cd5d8f7d384f19948b2fe9 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Wed, 30 May 2018 10:59:49 -0700 Subject: net/mlx5: Add temperature warning event to log Temperature warning event is sent by FW to indicate high temperature as detected by one of the sensors on the board. Add handling of this event by writing the numbers of the alert sensors to the kernel log. Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 23 +++++++++++++++++++++++ include/linux/mlx5/device.h | 7 +++++++ include/linux/mlx5/mlx5_ifc.h | 2 +- 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1814f803bd2c..1a3a2b9a7232 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -144,6 +144,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_GPIO_EVENT"; case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; case MLX5_EVENT_TYPE_REMOTE_CONFIG: return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; case MLX5_EVENT_TYPE_DB_BF_CONGESTION: @@ -396,6 +398,20 @@ static void general_event_handler(struct mlx5_core_dev *dev, } } +static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, + struct mlx5_eqe *eqe) +{ + u64 value_lsb; + u64 value_msb; + + value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); + + mlx5_core_warn(dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb); +} + /* caller must eventually call mlx5_cq_put on the returned cq */ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) { @@ -550,6 +566,10 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); break; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + mlx5_temp_warning_event(dev, eqe); + break; + case MLX5_EVENT_TYPE_GENERAL_EVENT: general_event_handler(dev, eqe); break; @@ -827,6 +847,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); + if (MLX5_CAP_GEN(dev, temp_warn_event)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index db0332a6d23c..c1095e08f0c8 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -314,6 +314,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, + MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_PPS_EVENT = 0x25, @@ -626,6 +627,11 @@ struct mlx5_eqe_dct { __be32 dctn; }; +struct mlx5_eqe_temp_warning { + __be64 sensor_warning_msb; + __be64 sensor_warning_lsb; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -642,6 +648,7 @@ union ev_data { struct mlx5_eqe_port_module port_module; struct mlx5_eqe_pps pps; struct mlx5_eqe_dct dct; + struct mlx5_eqe_temp_warning temp_warning; } __packed; struct mlx5_eqe { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 05b480fae27d..ca6c0dfb5ffe 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -912,7 +912,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1d0[0x1]; + u8 temp_warn_event[0x1]; u8 dcbx[0x1]; u8 general_notification_event[0x1]; u8 reserved_at_1d3[0x2]; -- cgit v1.2.3 From 1f0cf89b09305743fca5898660a7be83aab38a74 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Wed, 30 May 2018 10:59:50 -0700 Subject: net/mlx5: Add FPGA QP error event The FPGA queue pair (QP) event fires whenever a QP on the FPGA transitions to the error state. At this stage, this event is unrecoverable, it may become recoverable in the future. Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 7 +++++-- include/linux/mlx5/device.h | 1 + include/linux/mlx5/mlx5_ifc.h | 1 + include/linux/mlx5/mlx5_ifc_fpga.h | 16 ++++++++++++++++ 4 files changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1a3a2b9a7232..406c23862f5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -164,6 +164,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; case MLX5_EVENT_TYPE_FPGA_ERROR: return "MLX5_EVENT_TYPE_FPGA_ERROR"; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_GENERAL_EVENT: return "MLX5_EVENT_TYPE_GENERAL_EVENT"; default: @@ -563,6 +565,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_FPGA_ERROR: + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); break; @@ -842,11 +845,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); if (MLX5_CAP_GEN(dev, fpga)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR); + async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | + (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); if (MLX5_CAP_GEN_MAX(dev, dct)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); - if (MLX5_CAP_GEN(dev, temp_warn_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index c1095e08f0c8..0f006cf8343d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -331,6 +331,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, + MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ca6c0dfb5ffe..8e0b8865f91e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -60,6 +60,7 @@ enum { MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, + MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21 }; enum { diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 193091537cb6..64d0f40d4cc3 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -470,6 +470,22 @@ struct mlx5_ifc_ipsec_counters_bits { u8 dropped_cmd[0x40]; }; +enum { + MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, + MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, +}; + +struct mlx5_ifc_fpga_qp_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x60]; + + u8 reserved_at_c0[0x8]; + u8 fpga_qpn[0x18]; +}; enum mlx5_ifc_fpga_ipsec_response_syndrome { MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, -- cgit v1.2.3 From e2b3e493784fe07eac385611acbaaf7bff86c496 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 30 May 2018 23:51:54 +0200 Subject: net: ti: cpsw: include gpio/consumer.h On platforms that don't always enable CONFIG_GPIOLIB, we run into a build failure: drivers/net/ethernet/ti/cpsw.c: In function 'cpsw_probe': drivers/net/ethernet/ti/cpsw.c:3006:9: error: implicit declaration of function 'devm_gpiod_get_array_optional' [-Werror=implicit-function-declaration] mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/ti/cpsw.c:3006:59: error: 'GPIOD_OUT_LOW' undeclared (first use in this function); did you mean 'GPIOF_INIT_LOW'? mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW); Since we cannot rely on this to be visible from gpio.h, we have to include gpio/consumer.h directly. Fixes: 2652113ff043 ("net: ethernet: ti: Allow most drivers with COMPILE_TEST") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 643cd2d9dfb6..534596ce00d3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From 7bb8c9969d919517fc379cacebd8fa93704173db Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 31 May 2018 00:15:42 +0200 Subject: net: dsa: mv88e6xxx: Be explicit about DT or pdata Make it explicit that either device tree is used or platform data. If neither is available, abort the probe. Reported-by: Dan Carpenter Fixes: 877b7cb0b6f2 ("net: dsa: mv88e6xxx: Add minimal platform_data support") Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 12df00f593b7..437cd6eb4faa 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4389,6 +4389,9 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) int port; int err; + if (!np && !pdata) + return -EINVAL; + if (np) compat_info = of_device_get_match_data(dev); -- cgit v1.2.3 From 4b8e6ac41a594ea67ded6af6af5935f03221ea4c Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 31 May 2018 02:05:07 +0000 Subject: virtio_net: fix error return code in virtnet_probe() Fix to return a negative error code from the failover create fail error handling case instead of 0, as done elsewhere in this function. Fixes: ba5e4426e80e ("virtio_net: Extend virtio to use VF datapath when available") Signed-off-by: Wei Yongjun Acked-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8f08a3e1bbaa..2d55e2af7cb6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2935,8 +2935,10 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { vi->failover = net_failover_create(vi->dev); - if (IS_ERR(vi->failover)) + if (IS_ERR(vi->failover)) { + err = PTR_ERR(vi->failover); goto free_vqs; + } } err = register_netdev(dev); -- cgit v1.2.3 From db9d7d36eecc8926f03a8f2e46781887577b3353 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 31 May 2018 10:07:43 +0200 Subject: net: mvpp2: Split the PPv2 driver to a dedicated directory As the mvpp2 driver is growing, move this driver to a dedicated directory and split it into several files. Since this driver has a lot of register defines and structure definitions, it can benefit from having all of this into a dedicated header file, named mvpp2.h. A good chunk of the mvpp2 code is dedicated to Header Parser handling, so we introduce mvpp2_prs.h where all Header Parser definitions are located, and mvpp2_prs.c containing the related code. In the same way, mvpp2_cls.h and mvpp2_cls.c are created to contain Classifier and RSS related code. The former 'mvpp2.c' file is renamed 'mvpp2_main.c' so that we can keep the driver binary named 'mvpp2'. This commit is only about spliting the driver into multiple files and doesn't introduce any new function, feature or fix besides removing 'static' keywords when needed. Signed-off-by: Maxime Chevallier Tested-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Makefile | 2 +- drivers/net/ethernet/marvell/mvpp2.c | 9159 ----------------------- drivers/net/ethernet/marvell/mvpp2/Makefile | 7 + drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 1046 +++ drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 141 + drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h | 44 + drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 5281 +++++++++++++ drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c | 2467 ++++++ drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h | 314 + 9 files changed, 9301 insertions(+), 9160 deletions(-) delete mode 100644 drivers/net/ethernet/marvell/mvpp2.c create mode 100644 drivers/net/ethernet/marvell/mvpp2/Makefile create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2.h create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 9498ed26dbe5..55d4d10aa7d3 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o obj-$(CONFIG_MVNETA) += mvneta.o -obj-$(CONFIG_MVPP2) += mvpp2.o +obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c deleted file mode 100644 index 6847cd431aa0..000000000000 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ /dev/null @@ -1,9159 +0,0 @@ -/* - * Driver for Marvell PPv2 network controller for Armada 375 SoC. - * - * Copyright (C) 2014 Marvell - * - * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Fifo Registers */ -#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) -#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) -#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 -#define MVPP2_RX_FIFO_INIT_REG 0x64 -#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) -#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) - -/* RX DMA Top Registers */ -#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) -#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) -#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) -#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) -#define MVPP2_POOL_BUF_SIZE_OFFSET 5 -#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) -#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff -#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) -#define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 -#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 -#define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 -#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 -#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 -#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 -#define MVPP2_RXQ_DISABLE_MASK BIT(31) - -/* Top Registers */ -#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) -#define MVPP2_DSA_EXTENDED BIT(5) - -/* Parser Registers */ -#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 -#define MVPP2_PRS_PORT_LU_MAX 0xf -#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) -#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) -#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) -#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) -#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) -#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_TCAM_IDX_REG 0x1100 -#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) -#define MVPP2_PRS_TCAM_INV_MASK BIT(31) -#define MVPP2_PRS_SRAM_IDX_REG 0x1200 -#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) -#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 -#define MVPP2_PRS_TCAM_EN_MASK BIT(0) - -/* RSS Registers */ -#define MVPP22_RSS_INDEX 0x1500 -#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) -#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) -#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 -#define MVPP22_RSS_TABLE_POINTER(p) (p) -#define MVPP22_RSS_WIDTH 0x150c - -/* Classifier Registers */ -#define MVPP2_CLS_MODE_REG 0x1800 -#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) -#define MVPP2_CLS_PORT_WAY_REG 0x1810 -#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) -#define MVPP2_CLS_LKP_INDEX_REG 0x1814 -#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 -#define MVPP2_CLS_LKP_TBL_REG 0x1818 -#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff -#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) -#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 -#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 -#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 -#define MVPP2_CLS_FLOW_TBL2_REG 0x182c -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 -#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) -#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 -#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) - -/* Descriptor Manager Top Registers */ -#define MVPP2_RXQ_NUM_REG 0x2040 -#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 -#define MVPP22_DESC_ADDR_OFFS 8 -#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 -#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) -#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 -#define MVPP2_RXQ_NUM_NEW_OFFSET 16 -#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) -#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff -#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 -#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 -#define MVPP2_RXQ_THRESH_REG 0x204c -#define MVPP2_OCCUPIED_THRESH_OFFSET 0 -#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff -#define MVPP2_RXQ_INDEX_REG 0x2050 -#define MVPP2_TXQ_NUM_REG 0x2080 -#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 -#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 -#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_TXQ_THRESH_REG 0x2094 -#define MVPP2_TXQ_THRESH_OFFSET 16 -#define MVPP2_TXQ_THRESH_MASK 0x3fff -#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 -#define MVPP2_TXQ_INDEX_REG 0x2098 -#define MVPP2_TXQ_PREF_BUF_REG 0x209c -#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) -#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) -#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) -#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) -#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) -#define MVPP2_TXQ_PENDING_REG 0x20a0 -#define MVPP2_TXQ_PENDING_MASK 0x3fff -#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 -#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) -#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 -#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 -#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 -#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 -#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 -#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff -#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 -#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 -#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) -#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 -#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff -#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) - -/* MBUS bridge registers */ -#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) -#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) -#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) -#define MVPP2_BASE_ADDR_ENABLE 0x4060 - -/* AXI Bridge Registers */ -#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 -#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 -#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 -#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 -#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 -#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c -#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 -#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 -#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 -#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 -#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 -#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 - -/* Values for AXI Bridge registers */ -#define MVPP22_AXI_ATTR_CACHE_OFFS 0 -#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 - -#define MVPP22_AXI_CODE_CACHE_OFFS 0 -#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 - -#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 -#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 -#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb - -#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 -#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 - -/* Interrupt Cause and Mask registers */ -#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) -#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 - -#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) -#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) - -#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 - -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 - -#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 -#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 - -#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) -#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) -#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) -#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 -#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) -#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) -#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) -#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) -#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) -#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) -#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc -#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 -#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 - -/* Buffer Manager registers */ -#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) -#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 -#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) -#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 -#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) -#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 -#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) -#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 -#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff -#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 -#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) -#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) -#define MVPP2_BM_START_MASK BIT(0) -#define MVPP2_BM_STOP_MASK BIT(1) -#define MVPP2_BM_STATE_MASK BIT(4) -#define MVPP2_BM_LOW_THRESH_OFFS 8 -#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 -#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_LOW_THRESH_OFFS) -#define MVPP2_BM_HIGH_THRESH_OFFS 16 -#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 -#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_HIGH_THRESH_OFFS) -#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) -#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) -#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) -#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) -#define MVPP2_BM_BPPE_FULL_MASK BIT(3) -#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) -#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) -#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 -#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 -#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 -#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) -#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) -#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) -#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) -#define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 -#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 - -/* TX Scheduler registers */ -#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 -#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 -#define MVPP2_TXP_SCHED_ENQ_MASK 0xff -#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 -#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 -#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 -#define MVPP2_TXP_SCHED_MTU_REG 0x801c -#define MVPP2_TXP_MTU_MAX 0x7FFFF -#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 -#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 -#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff -#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) -#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff -#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff - -/* TX general registers */ -#define MVPP2_TX_SNOOP_REG 0x8800 -#define MVPP2_TX_PORT_FLUSH_REG 0x8810 -#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) - -/* LMS registers */ -#define MVPP2_SRC_ADDR_MIDDLE 0x24 -#define MVPP2_SRC_ADDR_HIGH 0x28 -#define MVPP2_PHY_AN_CFG0_REG 0x34 -#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 - -/* Per-port registers */ -#define MVPP2_GMAC_CTRL_0_REG 0x0 -#define MVPP2_GMAC_PORT_EN_MASK BIT(0) -#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) -#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 -#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc -#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) -#define MVPP2_GMAC_CTRL_1_REG 0x4 -#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) -#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) -#define MVPP2_GMAC_PCS_LB_EN_BIT 6 -#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) -#define MVPP2_GMAC_SA_LOW_OFFS 7 -#define MVPP2_GMAC_CTRL_2_REG 0x8 -#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) -#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) -#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) -#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) -#define MVPP2_GMAC_DISABLE_PADDING BIT(5) -#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) -#define MVPP2_GMAC_AUTONEG_CONFIG 0xc -#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) -#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) -#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) -#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) -#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) -#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) -#define MVPP2_GMAC_AN_SPEED_EN BIT(7) -#define MVPP2_GMAC_FC_ADV_EN BIT(9) -#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) -#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) -#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) -#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) -#define MVPP2_GMAC_STATUS0 0x10 -#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) -#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) -#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) -#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) -#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) -#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) -#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) -#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c -#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ - MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) -#define MVPP22_GMAC_INT_STAT 0x20 -#define MVPP22_GMAC_INT_STAT_LINK BIT(1) -#define MVPP22_GMAC_INT_MASK 0x24 -#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) -#define MVPP22_GMAC_CTRL_4_REG 0x90 -#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) -#define MVPP22_CTRL4_RX_FC_EN BIT(3) -#define MVPP22_CTRL4_TX_FC_EN BIT(4) -#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) -#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) -#define MVPP22_GMAC_INT_SUM_MASK 0xa4 -#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) - -/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, - * relative to port->base. - */ -#define MVPP22_XLG_CTRL0_REG 0x100 -#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) -#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) -#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) -#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) -#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) -#define MVPP22_XLG_CTRL1_REG 0x104 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff -#define MVPP22_XLG_STATUS 0x10c -#define MVPP22_XLG_STATUS_LINK_UP BIT(0) -#define MVPP22_XLG_INT_STAT 0x114 -#define MVPP22_XLG_INT_STAT_LINK BIT(1) -#define MVPP22_XLG_INT_MASK 0x118 -#define MVPP22_XLG_INT_MASK_LINK BIT(1) -#define MVPP22_XLG_CTRL3_REG 0x11c -#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) -#define MVPP22_XLG_EXT_INT_MASK 0x15c -#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) -#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) -#define MVPP22_XLG_CTRL4_REG 0x184 -#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) -#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) -#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) -#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) - -/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ -#define MVPP22_SMI_MISC_CFG_REG 0x1204 -#define MVPP22_SMI_POLLING_EN BIT(10) - -#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) - -#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff - -/* Descriptor ring Macros */ -#define MVPP2_QUEUE_NEXT_DESC(q, index) \ - (((index) < (q)->last_desc) ? ((index) + 1) : 0) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) -#define MVPP22_MPCS_CTRL 0x14 -#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) -#define MVPP22_MPCS_CLK_RESET 0x14c -#define MAC_CLK_RESET_SD_TX BIT(0) -#define MAC_CLK_RESET_SD_RX BIT(1) -#define MAC_CLK_RESET_MAC BIT(2) -#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) -#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) -#define MVPP22_XPCS_CFG0 0x0 -#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) -#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) - -/* System controller registers. Accessed through a regmap. */ -#define GENCONF_SOFT_RESET1 0x1108 -#define GENCONF_SOFT_RESET1_GOP BIT(6) -#define GENCONF_PORT_CTRL0 0x1110 -#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) -#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) -#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) -#define GENCONF_PORT_CTRL1 0x1114 -#define GENCONF_PORT_CTRL1_EN(p) BIT(p) -#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) -#define GENCONF_CTRL0 0x1120 -#define GENCONF_CTRL0_PORT0_RGMII BIT(0) -#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) -#define GENCONF_CTRL0_PORT1_RGMII BIT(2) - -/* Various constants */ - -/* Coalescing */ -#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 -#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL -#define MVPP2_TXDONE_COAL_USEC 1000 -#define MVPP2_RX_COAL_PKTS 32 -#define MVPP2_RX_COAL_USEC 64 - -/* The two bytes Marvell header. Either contains a special value used - * by Marvell switches when a specific hardware mode is enabled (not - * supported by this driver) or is filled automatically by zeroes on - * the RX side. Those two bytes being at the front of the Ethernet - * header, they allow to have the IP header aligned on a 4 bytes - * boundary automatically: the hardware skips those two bytes on its - * own. - */ -#define MVPP2_MH_SIZE 2 -#define MVPP2_ETH_TYPE_LEN 2 -#define MVPP2_PPPOE_HDR_SIZE 8 -#define MVPP2_VLAN_TAG_LEN 4 -#define MVPP2_VLAN_TAG_EDSA_LEN 8 - -/* Lbtd 802.3 type */ -#define MVPP2_IP_LBDT_TYPE 0xfffa - -#define MVPP2_TX_CSUM_MAX_SIZE 9800 - -/* Timeout constants */ -#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 -#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 - -#define MVPP2_TX_MTU_MAX 0x7ffff - -/* Maximum number of T-CONTs of PON port */ -#define MVPP2_MAX_TCONT 16 - -/* Maximum number of supported ports */ -#define MVPP2_MAX_PORTS 4 - -/* Maximum number of TXQs used by single port */ -#define MVPP2_MAX_TXQ 8 - -/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO - * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), - * multiply this value by two to count the maximum number of skb descs needed. - */ -#define MVPP2_MAX_TSO_SEGS 300 -#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) - -/* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 - -/* Max number of Rx descriptors */ -#define MVPP2_MAX_RXD_MAX 1024 -#define MVPP2_MAX_RXD_DFLT 128 - -/* Max number of Tx descriptors */ -#define MVPP2_MAX_TXD_MAX 2048 -#define MVPP2_MAX_TXD_DFLT 1024 - -/* Amount of Tx descriptors that can be reserved at once by CPU */ -#define MVPP2_CPU_DESC_CHUNK 64 - -/* Max number of Tx descriptors in each aggregated queue */ -#define MVPP2_AGGR_TXQ_SIZE 256 - -/* Descriptor aligned size */ -#define MVPP2_DESC_ALIGNED_SIZE 32 - -/* Descriptor alignment mask */ -#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) - -/* RX FIFO constants */ -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 -#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 - -/* TX FIFO constants */ -#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa -#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 -#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 -#define MVPP2_TX_FIFO_THRESHOLD_10KB \ - (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) -#define MVPP2_TX_FIFO_THRESHOLD_3KB \ - (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) - -/* RX buffer constants */ -#define MVPP2_SKB_SHINFO_SIZE \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - -#define MVPP2_RX_PKT_SIZE(mtu) \ - ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ - ETH_HLEN + ETH_FCS_LEN, cache_line_size()) - -#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) -#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) -#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ - ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) - -#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) - -/* IPv6 max L3 address size */ -#define MVPP2_MAX_L3_ADDR_SIZE 16 - -/* Port flags */ -#define MVPP2_F_LOOPBACK BIT(0) - -/* Marvell tag types */ -enum mvpp2_tag_type { - MVPP2_TAG_TYPE_NONE = 0, - MVPP2_TAG_TYPE_MH = 1, - MVPP2_TAG_TYPE_DSA = 2, - MVPP2_TAG_TYPE_EDSA = 3, - MVPP2_TAG_TYPE_VLAN = 4, - MVPP2_TAG_TYPE_LAST = 5 -}; - -/* Parser constants */ -#define MVPP2_PRS_TCAM_SRAM_SIZE 256 -#define MVPP2_PRS_TCAM_WORDS 6 -#define MVPP2_PRS_SRAM_WORDS 4 -#define MVPP2_PRS_FLOW_ID_SIZE 64 -#define MVPP2_PRS_FLOW_ID_MASK 0x3f -#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 -#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) -#define MVPP2_PRS_IPV4_HEAD 0x40 -#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 -#define MVPP2_PRS_IPV4_MC 0xe0 -#define MVPP2_PRS_IPV4_MC_MASK 0xf0 -#define MVPP2_PRS_IPV4_BC_MASK 0xff -#define MVPP2_PRS_IPV4_IHL 0x5 -#define MVPP2_PRS_IPV4_IHL_MASK 0xf -#define MVPP2_PRS_IPV6_MC 0xff -#define MVPP2_PRS_IPV6_MC_MASK 0xff -#define MVPP2_PRS_IPV6_HOP_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f -#define MVPP2_PRS_DBL_VLANS_MAX 100 -#define MVPP2_PRS_CAST_MASK BIT(0) -#define MVPP2_PRS_MCAST_VAL BIT(0) -#define MVPP2_PRS_UCAST_VAL 0x0 - -/* Tcam structure: - * - lookup ID - 4 bits - * - port ID - 1 byte - * - additional information - 1 byte - * - header data - 8 bytes - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). - */ -#define MVPP2_PRS_AI_BITS 8 -#define MVPP2_PRS_PORT_MASK 0xff -#define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 - -#define MVPP2_PRS_VID_TCAM_BYTE 2 - -/* TCAM range for unicast and multicast filtering. We have 25 entries per port, - * with 4 dedicated to UC filtering and the rest to multicast filtering. - * Additionnally we reserve one entry for the broadcast address, and one for - * each port's own address. - */ -#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 -#define MVPP2_PRS_MAC_RANGE_SIZE 80 - -/* Number of entries per port dedicated to UC and MC filtering */ -#define MVPP2_PRS_MAC_UC_FILT_MAX 4 -#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ - MVPP2_PRS_MAC_UC_FILT_MAX) - -/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 - * 10 VLAN ID filter entries per port - * 1 default VLAN filter entry per port - * It is assumed that there are 3 ports for filter, not including loopback port - */ -#define MVPP2_PRS_VLAN_FILT_MAX 11 -#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 - -#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) -#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) - -/* Tcam entries ID */ -#define MVPP2_PE_DROP_ALL 0 -#define MVPP2_PE_FIRST_FREE_TID 1 - -/* MAC filtering range */ -#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) -#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ - MVPP2_PRS_MAC_RANGE_SIZE + 1) -/* VLAN filtering range */ -#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) -#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ - MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) -#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) -#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) -#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) -#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) -#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) -#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) -#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) -#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) -#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) -#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) -#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) -#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) -#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) -#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) -#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) -#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) -#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) -#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) -#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) -#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) -#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) -#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -/* reserved */ -#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) -#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) -#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) - -#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ - ((port) * MVPP2_PRS_VLAN_FILT_MAX)) -#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) -/* Index of default vid filter for given port */ -#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) - -/* Sram structure - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). - */ -#define MVPP2_PRS_SRAM_RI_OFFS 0 -#define MVPP2_PRS_SRAM_RI_WORD 0 -#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 -#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 -#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 -#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 -#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 -#define MVPP2_PRS_SRAM_UDF_OFFS 73 -#define MVPP2_PRS_SRAM_UDF_BITS 8 -#define MVPP2_PRS_SRAM_UDF_MASK 0xff -#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 -#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 -#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 -#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 -#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 -#define MVPP2_PRS_SRAM_AI_OFFS 90 -#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 -#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 -#define MVPP2_PRS_SRAM_AI_MASK 0xff -#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 -#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf -#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 -#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 - -/* Sram result info bits assignment */ -#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 -#define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_VLAN_NONE 0x0 -#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) -#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) -#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 -#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) -#define MVPP2_PRS_RI_L2_UCAST 0x0 -#define MVPP2_PRS_RI_L2_MCAST BIT(9) -#define MVPP2_PRS_RI_L2_BCAST BIT(10) -#define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_UN 0x0 -#define MVPP2_PRS_RI_L3_IP4 BIT(12) -#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) -#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) -#define MVPP2_PRS_RI_L3_IP6 BIT(14) -#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) -#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_L3_UCAST 0x0 -#define MVPP2_PRS_RI_L3_MCAST BIT(15) -#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 -#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) -#define MVPP2_PRS_RI_UDF3_MASK 0x300000 -#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) -#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 -#define MVPP2_PRS_RI_L4_TCP BIT(22) -#define MVPP2_PRS_RI_L4_UDP BIT(23) -#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) -#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 -#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) -#define MVPP2_PRS_RI_DROP_MASK 0x80000000 - -/* Sram additional info bits assignment */ -#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) -#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) -#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) -#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) -#define MVPP2_PRS_SINGLE_VLAN_AI 0 -#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) -#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) - -/* DSA/EDSA type */ -#define MVPP2_PRS_TAGGED true -#define MVPP2_PRS_UNTAGGED false -#define MVPP2_PRS_EDSA true -#define MVPP2_PRS_DSA false - -/* MAC entries, shadow udf */ -enum mvpp2_prs_udf { - MVPP2_PRS_UDF_MAC_DEF, - MVPP2_PRS_UDF_MAC_RANGE, - MVPP2_PRS_UDF_L2_DEF, - MVPP2_PRS_UDF_L2_DEF_COPY, - MVPP2_PRS_UDF_L2_USER, -}; - -/* Lookup ID */ -enum mvpp2_prs_lookup { - MVPP2_PRS_LU_MH, - MVPP2_PRS_LU_MAC, - MVPP2_PRS_LU_DSA, - MVPP2_PRS_LU_VLAN, - MVPP2_PRS_LU_VID, - MVPP2_PRS_LU_L2, - MVPP2_PRS_LU_PPPOE, - MVPP2_PRS_LU_IP4, - MVPP2_PRS_LU_IP6, - MVPP2_PRS_LU_FLOWS, - MVPP2_PRS_LU_LAST, -}; - -/* L2 cast enum */ -enum mvpp2_prs_l2_cast { - MVPP2_PRS_L2_UNI_CAST, - MVPP2_PRS_L2_MULTI_CAST, -}; - -/* L3 cast enum */ -enum mvpp2_prs_l3_cast { - MVPP2_PRS_L3_UNI_CAST, - MVPP2_PRS_L3_MULTI_CAST, - MVPP2_PRS_L3_BROAD_CAST -}; - -/* Classifier constants */ -#define MVPP2_CLS_FLOWS_TBL_SIZE 512 -#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 -#define MVPP2_CLS_LKP_TBL_SIZE 64 -#define MVPP2_CLS_RX_QUEUES 256 - -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 - -/* BM constants */ -#define MVPP2_BM_JUMBO_BUF_NUM 512 -#define MVPP2_BM_LONG_BUF_NUM 1024 -#define MVPP2_BM_SHORT_BUF_NUM 2048 -#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) -#define MVPP2_BM_POOL_PTR_ALIGN 128 - -/* BM cookie (32 bits) definition */ -#define MVPP2_BM_COOKIE_POOL_OFFS 8 -#define MVPP2_BM_COOKIE_CPU_OFFS 24 - -#define MVPP2_BM_SHORT_FRAME_SIZE 512 -#define MVPP2_BM_LONG_FRAME_SIZE 2048 -#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 -/* BM short pool packet size - * These value assure that for SWF the total number - * of bytes allocated for each buffer will be 512 - */ -#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) -#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) -#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) - -#define MVPP21_ADDR_SPACE_SZ 0 -#define MVPP22_ADDR_SPACE_SZ SZ_64K - -#define MVPP2_MAX_THREADS 8 -#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS - -enum mvpp2_bm_pool_log_num { - MVPP2_BM_SHORT, - MVPP2_BM_LONG, - MVPP2_BM_JUMBO, - MVPP2_BM_POOLS_NUM -}; - -static struct { - int pkt_size; - int buf_num; -} mvpp2_pools[MVPP2_BM_POOLS_NUM]; - -/* GMAC MIB Counters register definitions */ -#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 -#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 -#define MVPP22_MIB_COUNTERS_OFFSET 0x0 -#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 - -#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 -#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 -#define MVPP2_MIB_CRC_ERRORS_SENT 0xc -#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 -#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 -#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c -#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 -#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 -#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 -#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c -#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 -#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 -#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 -#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 -#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 -#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c -#define MVPP2_MIB_FC_SENT 0x54 -#define MVPP2_MIB_FC_RCVD 0x58 -#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c -#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 -#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 -#define MVPP2_MIB_OVERSIZE_RCVD 0x68 -#define MVPP2_MIB_JABBER_RCVD 0x6c -#define MVPP2_MIB_MAC_RCV_ERROR 0x70 -#define MVPP2_MIB_BAD_CRC_EVENT 0x74 -#define MVPP2_MIB_COLLISION 0x78 -#define MVPP2_MIB_LATE_COLLISION 0x7c - -#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) - -#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) - -/* Definitions */ - -/* Shared Packet Processor resources */ -struct mvpp2 { - /* Shared registers' base addresses */ - void __iomem *lms_base; - void __iomem *iface_base; - - /* On PPv2.2, each "software thread" can access the base - * register through a separate address space, each 64 KB apart - * from each other. Typically, such address spaces will be - * used per CPU. - */ - void __iomem *swth_base[MVPP2_MAX_THREADS]; - - /* On PPv2.2, some port control registers are located into the system - * controller space. These registers are accessible through a regmap. - */ - struct regmap *sysctrl_base; - - /* Common clocks */ - struct clk *pp_clk; - struct clk *gop_clk; - struct clk *mg_clk; - struct clk *mg_core_clk; - struct clk *axi_clk; - - /* List of pointers to port structures */ - int port_count; - struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; - - /* Aggregated TXQs */ - struct mvpp2_tx_queue *aggr_txqs; - - /* BM pools */ - struct mvpp2_bm_pool *bm_pools; - - /* PRS shadow table */ - struct mvpp2_prs_shadow *prs_shadow; - /* PRS auxiliary table for double vlan entries control */ - bool *prs_double_vlans; - - /* Tclk value */ - u32 tclk; - - /* HW version */ - enum { MVPP21, MVPP22 } hw_version; - - /* Maximum number of RXQs per port */ - unsigned int max_port_rxqs; - - /* Workqueue to gather hardware statistics */ - char queue_name[30]; - struct workqueue_struct *stats_queue; -}; - -struct mvpp2_pcpu_stats { - struct u64_stats_sync syncp; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; -}; - -/* Per-CPU port control */ -struct mvpp2_port_pcpu { - struct hrtimer tx_done_timer; - bool timer_scheduled; - /* Tasklet for egress finalization */ - struct tasklet_struct tx_done_tasklet; -}; - -struct mvpp2_queue_vector { - int irq; - struct napi_struct napi; - enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; - int sw_thread_id; - u16 sw_thread_mask; - int first_rxq; - int nrxqs; - u32 pending_cause_rx; - struct mvpp2_port *port; -}; - -struct mvpp2_port { - u8 id; - - /* Index of the port from the "group of ports" complex point - * of view - */ - int gop_id; - - int link_irq; - - struct mvpp2 *priv; - - /* Firmware node associated to the port */ - struct fwnode_handle *fwnode; - - /* Is a PHY always connected to the port */ - bool has_phy; - - /* Per-port registers' base address */ - void __iomem *base; - void __iomem *stats_base; - - struct mvpp2_rx_queue **rxqs; - unsigned int nrxqs; - struct mvpp2_tx_queue **txqs; - unsigned int ntxqs; - struct net_device *dev; - - int pkt_size; - - /* Per-CPU port control */ - struct mvpp2_port_pcpu __percpu *pcpu; - - /* Flags */ - unsigned long flags; - - u16 tx_ring_size; - u16 rx_ring_size; - struct mvpp2_pcpu_stats __percpu *stats; - u64 *ethtool_stats; - - /* Per-port work and its lock to gather hardware statistics */ - struct mutex gather_stats_lock; - struct delayed_work stats_work; - - struct device_node *of_node; - - phy_interface_t phy_interface; - struct phylink *phylink; - struct phy *comphy; - - struct mvpp2_bm_pool *pool_long; - struct mvpp2_bm_pool *pool_short; - - /* Index of first port's physical RXQ */ - u8 first_rxq; - - struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; - unsigned int nqvecs; - bool has_tx_irqs; - - u32 tx_time_coal; -}; - -/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the - * layout of the transmit and reception DMA descriptors, and their - * layout is therefore defined by the hardware design - */ - -#define MVPP2_TXD_L3_OFF_SHIFT 0 -#define MVPP2_TXD_IP_HLEN_SHIFT 8 -#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) -#define MVPP2_TXD_L4_CSUM_NOT BIT(14) -#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) -#define MVPP2_TXD_PADDING_DISABLE BIT(23) -#define MVPP2_TXD_L4_UDP BIT(24) -#define MVPP2_TXD_L3_IP6 BIT(26) -#define MVPP2_TXD_L_DESC BIT(28) -#define MVPP2_TXD_F_DESC BIT(29) - -#define MVPP2_RXD_ERR_SUMMARY BIT(15) -#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) -#define MVPP2_RXD_ERR_CRC 0x0 -#define MVPP2_RXD_ERR_OVERRUN BIT(13) -#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) -#define MVPP2_RXD_BM_POOL_ID_OFFS 16 -#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) -#define MVPP2_RXD_HWF_SYNC BIT(21) -#define MVPP2_RXD_L4_CSUM_OK BIT(22) -#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) -#define MVPP2_RXD_L4_TCP BIT(25) -#define MVPP2_RXD_L4_UDP BIT(26) -#define MVPP2_RXD_L3_IP4 BIT(28) -#define MVPP2_RXD_L3_IP6 BIT(30) -#define MVPP2_RXD_BUF_HDR BIT(31) - -/* HW TX descriptor for PPv2.1 */ -struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ - u8 packet_offset; /* the offset from the buffer beginning */ - u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ -}; - -/* HW RX descriptor for PPv2.1 */ -struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ - u8 reserved4; /* bm_qset (for future use, BM) */ - u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; -}; - -/* HW TX descriptor for PPv2.2 */ -struct mvpp22_tx_desc { - u32 command; - u8 packet_offset; - u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; -}; - -/* HW RX descriptor for PPv2.2 */ -struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; -}; - -/* Opaque type used by the driver to manipulate the HW TX and RX - * descriptors - */ -struct mvpp2_tx_desc { - union { - struct mvpp21_tx_desc pp21; - struct mvpp22_tx_desc pp22; - }; -}; - -struct mvpp2_rx_desc { - union { - struct mvpp21_rx_desc pp21; - struct mvpp22_rx_desc pp22; - }; -}; - -struct mvpp2_txq_pcpu_buf { - /* Transmitted SKB */ - struct sk_buff *skb; - - /* Physical address of transmitted buffer */ - dma_addr_t dma; - - /* Size transmitted */ - size_t size; -}; - -/* Per-CPU Tx queue control */ -struct mvpp2_txq_pcpu { - int cpu; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the - * descriptor ring - */ - int count; - - int wake_threshold; - int stop_threshold; - - /* Number of Tx DMA descriptors reserved for each CPU */ - int reserved_num; - - /* Infos about transmitted buffers */ - struct mvpp2_txq_pcpu_buf *buffs; - - /* Index of last TX DMA descriptor that was inserted */ - int txq_put_index; - - /* Index of the TX DMA descriptor to be cleaned up */ - int txq_get_index; - - /* DMA buffer for TSO headers */ - char *tso_headers; - dma_addr_t tso_headers_dma; -}; - -struct mvpp2_tx_queue { - /* Physical number of this Tx queue */ - u8 id; - - /* Logical number of this Tx queue */ - u8 log_id; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the descriptor ring */ - int count; - - /* Per-CPU control of physical Tx queues */ - struct mvpp2_txq_pcpu __percpu *pcpu; - - u32 done_pkts_coal; - - /* Virtual address of thex Tx DMA descriptors array */ - struct mvpp2_tx_desc *descs; - - /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last Tx DMA descriptor */ - int last_desc; - - /* Index of the next Tx DMA descriptor to process */ - int next_desc_to_proc; -}; - -struct mvpp2_rx_queue { - /* RX queue number, in the range 0-31 for physical RXQs */ - u8 id; - - /* Num of rx descriptors in the rx descriptor ring */ - int size; - - u32 pkts_coal; - u32 time_coal; - - /* Virtual address of the RX DMA descriptors array */ - struct mvpp2_rx_desc *descs; - - /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last RX DMA descriptor */ - int last_desc; - - /* Index of the next RX DMA descriptor to process */ - int next_desc_to_proc; - - /* ID of port to which physical RXQ is mapped */ - int port; - - /* Port's logic RXQ number to which physical RXQ is mapped */ - int logic_rxq; -}; - -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - -struct mvpp2_prs_entry { - u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; -}; - -struct mvpp2_prs_shadow { - bool valid; - bool finish; - - /* Lookup ID */ - int lu; - - /* User defined offset */ - int udf; - - /* Result info */ - u32 ri; - u32 ri_mask; -}; - -struct mvpp2_cls_flow_entry { - u32 index; - u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; -}; - -struct mvpp2_cls_lookup_entry { - u32 lkpid; - u32 way; - u32 data; -}; - -struct mvpp2_bm_pool { - /* Pool number in the range 0-7 */ - int id; - - /* Buffer Pointers Pool External (BPPE) size */ - int size; - /* BPPE size in bytes */ - int size_bytes; - /* Number of buffers for this pool */ - int buf_num; - /* Pool buffer size */ - int buf_size; - /* Packet size */ - int pkt_size; - int frag_size; - - /* BPPE virtual base address */ - u32 *virt_addr; - /* BPPE DMA base address */ - dma_addr_t dma_addr; - - /* Ports using BM pool */ - u32 port_map; -}; - -#define IS_TSO_HEADER(txq_pcpu, addr) \ - ((addr) >= (txq_pcpu)->tso_headers_dma && \ - (addr) < (txq_pcpu)->tso_headers_dma + \ - (txq_pcpu)->size * TSO_HEADER_SIZE) - -/* The prototype is added here to be used in start_dev when using ACPI. This - * will be removed once phylink is used for all modes (dt+ACPI). - */ -static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, - const struct phylink_link_state *state); - -/* Queue modes */ -#define MVPP2_QDIST_SINGLE_MODE 0 -#define MVPP2_QDIST_MULTI_MODE 1 - -static int queue_mode = MVPP2_QDIST_SINGLE_MODE; - -module_param(queue_mode, int, 0444); -MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); - -#define MVPP2_DRIVER_NAME "mvpp2" -#define MVPP2_DRIVER_VERSION "1.0" - -/* Utility/helper methods */ - -static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) -{ - writel(data, priv->swth_base[0] + offset); -} - -static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) -{ - return readl(priv->swth_base[0] + offset); -} - -static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) -{ - return readl_relaxed(priv->swth_base[0] + offset); -} -/* These accessors should be used to access: - * - * - per-CPU registers, where each CPU has its own copy of the - * register. - * - * MVPP2_BM_VIRT_ALLOC_REG - * MVPP2_BM_ADDR_HIGH_ALLOC - * MVPP22_BM_ADDR_HIGH_RLS_REG - * MVPP2_BM_VIRT_RLS_REG - * MVPP2_ISR_RX_TX_CAUSE_REG - * MVPP2_ISR_RX_TX_MASK_REG - * MVPP2_TXQ_NUM_REG - * MVPP2_AGGR_TXQ_UPDATE_REG - * MVPP2_TXQ_RSVD_REQ_REG - * MVPP2_TXQ_RSVD_RSLT_REG - * MVPP2_TXQ_SENT_REG - * MVPP2_RXQ_NUM_REG - * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU - * register - * - * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) - * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) - * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) - */ -static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, - u32 offset, u32 data) -{ - writel(data, priv->swth_base[cpu] + offset); -} - -static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, - u32 offset) -{ - return readl(priv->swth_base[cpu] + offset); -} - -static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, - u32 offset, u32 data) -{ - writel_relaxed(data, priv->swth_base[cpu] + offset); -} - -static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, - u32 offset) -{ - return readl_relaxed(priv->swth_base[cpu] + offset); -} - -static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; - else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; -} - -static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - dma_addr_t dma_addr) -{ - dma_addr_t addr, offset; - - addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; - offset = dma_addr & MVPP2_TX_DESC_ALIGN; - - if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; - tx_desc->pp21.packet_offset = offset; - } else { - u64 val = (u64)addr; - - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; - tx_desc->pp22.buf_dma_addr_ptp |= val; - tx_desc->pp22.packet_offset = offset; - } -} - -static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; - else - return tx_desc->pp22.data_size; -} - -static void mvpp2_txdesc_size_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - size_t size) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; - else - tx_desc->pp22.data_size = size; -} - -static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - unsigned int txq) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.phys_txq = txq; - else - tx_desc->pp22.phys_txq = txq; -} - -static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - unsigned int command) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; - else - tx_desc->pp22.command = command; -} - -static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.packet_offset; - else - return tx_desc->pp22.packet_offset; -} - -static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; - else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; -} - -static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; - else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; -} - -static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; - else - return rx_desc->pp22.data_size; -} - -static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; - else - return rx_desc->pp22.status; -} - -static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) -{ - txq_pcpu->txq_get_index++; - if (txq_pcpu->txq_get_index == txq_pcpu->size) - txq_pcpu->txq_get_index = 0; -} - -static void mvpp2_txq_inc_put(struct mvpp2_port *port, - struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb, - struct mvpp2_tx_desc *tx_desc) -{ - struct mvpp2_txq_pcpu_buf *tx_buf = - txq_pcpu->buffs + txq_pcpu->txq_put_index; - tx_buf->skb = skb; - tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); - tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + - mvpp2_txdesc_offset_get(port, tx_desc); - txq_pcpu->txq_put_index++; - if (txq_pcpu->txq_put_index == txq_pcpu->size) - txq_pcpu->txq_put_index = 0; -} - -/* Get number of physical egress port */ -static inline int mvpp2_egress_port(struct mvpp2_port *port) -{ - return MVPP2_MAX_TCONT + port->id; -} - -/* Get number of physical TXQ */ -static inline int mvpp2_txq_phys(int port, int txq) -{ - return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; -} - -/* Parser configuration routines */ - -/* Update parser tcam and sram hw entries */ -static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) -{ - int i; - - if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); - - return 0; -} - -/* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) -{ - int i; - - if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - memset(pe, 0, sizeof(*pe)); - pe->index = tid; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, - MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) - return MVPP2_PRS_TCAM_ENTRY_INVALID; - - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); - - return 0; -} - -/* Invalidate tcam hw entry */ -static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) -{ - /* Write index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), - MVPP2_PRS_TCAM_INV_MASK); -} - -/* Enable shadow table entry and set its lookup ID */ -static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) -{ - priv->prs_shadow[index].valid = true; - priv->prs_shadow[index].lu = lu; -} - -/* Update ri fields in shadow table entry */ -static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, - unsigned int ri, unsigned int ri_mask) -{ - priv->prs_shadow[index].ri_mask = ri_mask; - priv->prs_shadow[index].ri = ri; -} - -/* Update lookup field in tcam sw entry */ -static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; -} - -/* Update mask for single port in tcam sw entry */ -static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, - unsigned int port, bool add) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); - else - pe->tcam.byte[enable_off] |= 1 << port; -} - -/* Update port map in tcam sw entry */ -static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, - unsigned int ports) -{ - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; -} - -/* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; -} - -/* Set byte of data and its enable bits in tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char byte, - unsigned char enable) -{ - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; -} - -/* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) -{ - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; -} - -/* Compare tcam data bytes with a pattern */ -static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, - u16 data) -{ - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); - u16 tcam_data; - - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; -} - -/* Update ai bits in tcam sw entry */ -static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int enable) -{ - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; - - for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { - if (!(enable & BIT(i))) - continue; - - if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; - else - pe->tcam.byte[ai_idx] &= ~(1 << i); - } - - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; -} - -/* Get ai bits from tcam sw entry */ -static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) -{ - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; -} - -/* Set ethertype in tcam sw entry */ -static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, - unsigned short ethertype) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); -} - -/* Set vid in tcam sw entry */ -static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, - unsigned short vid) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); -} - -/* Set bits in sram sw entry */ -static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); -} - -/* Clear bits in sram sw entry */ -static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); -} - -/* Update ri bits in sram sw entry */ -static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - - for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); - } -} - -/* Obtain ri bits from sram sw entry */ -static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) -{ - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; -} - -/* Update ai bits in sram sw entry */ -static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; - - for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); - } -} - -/* Read ai bits from sram sw entry */ -static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) -{ - u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; - - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); - - return bits; -} - -/* In sram sw entry set lookup ID field of the tcam key to be used in the next - * lookup interation - */ -static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, - unsigned int lu) -{ - int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; - - mvpp2_prs_sram_bits_clear(pe, sram_next_off, - MVPP2_PRS_SRAM_NEXT_LU_MASK); - mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); -} - -/* In the sram sw entry set sign and value of the next lookup offset - * and the offset value generated to the classifier - */ -static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, - unsigned int op) -{ - /* Set sign */ - if (shift < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - shift = 0 - shift; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - } - - /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; - - /* Reset and set operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* In the sram sw entry set sign and value of the user defined offset - * generated to the classifier - */ -static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, - unsigned int type, int offset, - unsigned int op) -{ - /* Set sign */ - if (offset < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - offset = 0 - offset; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - } - - /* Set value */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, - MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - - /* Set offset type */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, - MVPP2_PRS_SRAM_UDF_TYPE_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); - - /* Set offset operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, - MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* Find parser flow entry */ -static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ - for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { - u8 bits; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - bits = mvpp2_prs_sram_ai_get(&pe); - - /* Sram store classification lookup ID in AI bits [5:0] */ - if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) - return tid; - } - - return -ENOENT; -} - -/* Return first free tcam index, seeking from start to end */ -static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, - unsigned char end) -{ - int tid; - - if (start > end) - swap(start, end); - - if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) - end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; - - for (tid = start; tid <= end; tid++) { - if (!priv->prs_shadow[tid].valid) - return tid; - } - - return -EINVAL; -} - -/* Enable/disable dropping all mac da's */ -static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) -{ - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = MVPP2_PE_DROP_ALL; - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set port to unicast or multicast promiscuous mode */ -static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) -{ - struct mvpp2_prs_entry pe; - unsigned char cast_match; - unsigned int ri; - int tid; - - if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { - cast_match = MVPP2_PRS_UCAST_VAL; - tid = MVPP2_PE_MAC_UC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_UCAST; - } else { - cast_match = MVPP2_PRS_MCAST_VAL; - tid = MVPP2_PE_MAC_MC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_MCAST; - } - - /* promiscuous mode - Accept unknown unicast or multicast packets */ - if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = tid; - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); - - /* Match UC or MC addresses */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, - MVPP2_PRS_CAST_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa packets */ -static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, - bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift; - - if (extend) { - tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - - /* Set ai bits for next iteration */ - if (extend) - mvpp2_prs_sram_ai_update(&pe, 1, - MVPP2_PRS_SRAM_AI_MASK); - else - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - - /* Set result info bits to 'single vlan' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, - MVPP2_PRS_RI_VLAN_MASK); - /* If packet is tagged continue check vid filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - } else { - /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ - mvpp2_prs_sram_shift_set(&pe, shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa ethertype */ -static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, - bool add, bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift, port_mask; - - if (extend) { - tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : - MVPP2_PE_ETYPE_EDSA_UNTAGGED; - port_mask = 0; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : - MVPP2_PE_ETYPE_DSA_UNTAGGED; - port_mask = MVPP2_PRS_PORT_MASK; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Set ethertype */ - mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); - mvpp2_prs_match_etype(&pe, 2, 0); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, - MVPP2_PRS_RI_DSA_MASK); - /* Shift ethertype + 2 byte reserved + tag*/ - mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, - MVPP2_ETH_TYPE_LEN + 2 + 3, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - /* If packet is tagged continue check vlans */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - } else { - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - /* Mask/unmask all ports, depending on dsa type */ - mvpp2_prs_tcam_port_map_set(&pe, port_mask); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Search for existing single/triple vlan entry */ -static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_bits, ai_bits; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); - if (!match) - continue; - - /* Get vlan type */ - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - - /* Get current ai value from tcam */ - ai_bits = mvpp2_prs_tcam_ai_get(&pe); - /* Clear double vlan bit */ - ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; - - if (ai != ai_bits) - continue; - - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - return tid; - } - - return -ENOENT; -} - -/* Add/update single/triple vlan entry */ -static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, - unsigned int port_map) -{ - struct mvpp2_prs_entry pe; - int tid_aux, tid; - int ret = 0; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_vlan_find(priv, tpid, ai); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - /* Get last double vlan tid */ - for (tid_aux = MVPP2_PE_LAST_FREE_TID; - tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == - MVPP2_PRS_RI_VLAN_DOUBLE) - break; - } - - if (tid <= tid_aux) - return -EINVAL; - - memset(&pe, 0, sizeof(pe)); - pe.index = tid; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - mvpp2_prs_match_etype(&pe, 0, tpid); - - /* VLAN tag detected, proceed with VID filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, - MVPP2_PRS_RI_VLAN_MASK); - } else { - ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, - MVPP2_PRS_RI_VLAN_MASK); - } - mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* Get first free double vlan ai number */ -static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) -{ - int i; - - for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { - if (!priv->prs_double_vlans[i]) - return i; - } - - return -EINVAL; -} - -/* Search for existing double vlan entry */ -static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_mask; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); - - if (!match) - continue; - - ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; - if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) - return tid; - } - - return -ENOENT; -} - -/* Add or update double vlan entry */ -static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2, - unsigned int port_map) -{ - int tid_aux, tid, ai, ret = 0; - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - /* Set ai value for new double vlan entry */ - ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; - - /* Get first single/triple vlan tid */ - for (tid_aux = MVPP2_PE_FIRST_FREE_TID; - tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - break; - } - - if (tid >= tid_aux) - return -ERANGE; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = tid; - - priv->prs_double_vlans[ai] = true; - - mvpp2_prs_match_etype(&pe, 0, tpid1); - mvpp2_prs_match_etype(&pe, 4, tpid2); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - /* Shift 4 bytes - skip outer vlan tag */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* IPv4 header parsing for fragmentation and L4 offset */ -static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_IGMP)) - return -EINVAL; - - /* Not fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK_L); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - - mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, - ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv4 L3 multicast or broadcast */ -static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int mask, tid; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - switch (l3_cast) { - case MVPP2_PRS_L3_MULTI_CAST: - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, - MVPP2_PRS_IPV4_MC_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - case MVPP2_PRS_L3_BROAD_CAST: - mask = MVPP2_PRS_IPV4_BC_MASK; - mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - default: - return -EINVAL; - } - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for protocols over IPv6 */ -static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 6, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Write HW */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv6 L3 multicast entry */ -static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int tid; - - if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPv6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, - MVPP2_PRS_IPV6_MC_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Parser per-port initialization */ -static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, - int lu_max, int offset) -{ - u32 val; - - /* Set lookup ID */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); - val &= ~MVPP2_PRS_PORT_LU_MASK(port); - val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); - mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); - - /* Set maximum number of loops for packet received from port */ - val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); - val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); - val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); - mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); - - /* Set initial offset for packet header extraction for the first - * searching loop - */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); - val &= ~MVPP2_PRS_INIT_OFF_MASK(port); - val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); - mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); -} - -/* Default flow entries initialization for all ports */ -static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int port; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_hw_write(priv, &pe); - } -} - -/* Set default entry for Marvell Header field */ -static void mvpp2_prs_mh_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - pe.index = MVPP2_PE_MH_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); - mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set default entires (place holder) for promiscuous, non-promiscuous and - * multicast MAC addresses - */ -static void mvpp2_prs_mac_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - /* Create dummy entries for drop all and promiscuous modes */ - mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); -} - -/* Set default entries for various types of dsa packets */ -static void mvpp2_prs_dsa_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - /* None tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_EDSA); - - /* Tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_DSA); - - /* Tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* None tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - - /* Tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - - /* Tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* Set default entry, in case DSA or EDSA tag not found */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = MVPP2_PE_DSA_DEFAULT; - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - /* Shift 0 bytes */ - mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Clear all sram ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Initialize parser entries for VID filtering */ -static void mvpp2_prs_vid_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 4 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vid entry for extended DSA*/ - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, - MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Match basic ethertypes */ -static int mvpp2_prs_etype_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Ethertype: PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); - - mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: ARP */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: LBTD */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD, - MVPP2_PRS_IPV4_HEAD_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv6 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); - - /* Skip DIP of IPV6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + - MVPP2_MAX_L3_ADDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = MVPP2_PE_ETH_TYPE_UN; - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset even it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Configure vlan entries and detect up to 2 successive VLAN tags. - * Possible options: - * 0x8100, 0x88A8 - * 0x8100, 0x8100 - * 0x8100 - * 0x88A8 - */ -static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - - /* Double VLAN: 0x8100, 0x88A8 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Double VLAN: 0x8100, 0x8100 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x88a8 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x8100 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Set default double vlan entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_DBL; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear ai for next iterations */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_DBL_VLAN_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vlan none entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_NONE; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for PPPoE ethertype */ -static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* IPv4 over PPPoE with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IP); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv4 over PPPoE without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv6 over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IPv6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* Non-IP over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L3 offset even if it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv4 */ -static int mvpp2_prs_ip4_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - /* Set entries for TCP, UDP and IGMP over IPv4 */ - err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 Broadcast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); - if (err) - return err; - - /* IPv4 Multicast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Default IPv4 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_PROTO_UN; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv4 entry for unicast address */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_ADDR_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv6 */ -static int mvpp2_prs_ip6_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid, err; - - /* Set entries for TCP, UDP and ICMP over IPv6 */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, - MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, - MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ - /* Result Info: UDF7=1, DS lite */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, - MVPP2_PRS_RI_UDF7_IP6_LITE, - MVPP2_PRS_RI_UDF7_MASK); - if (err) - return err; - - /* IPv6 multicast */ - err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Entry for checking hop limit */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | - MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_L3_PROTO_MASK | - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - /* Set L4 offset relatively to our current place */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown ext protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, - MVPP2_PRS_IPV6_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unicast address */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_ADDR_UN; - - /* Finished: go to IPv6 again */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPV6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Find tcam entry with matched pair */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) -{ - unsigned char byte[2], enable[2]; - struct mvpp2_prs_entry pe; - u16 rvid, rmask; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); - mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); - - rvid = ((byte[0] & 0xf) << 8) + byte[1]; - rmask = ((enable[0] & 0xf) << 8) + enable[1]; - - if (rvid != vid || rmask != mask) - continue; - - return tid; - } - - return -ENOENT; -} - -/* Write parser entry for VID filtering */ -static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) -{ - unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + - port->id * MVPP2_PRS_VLAN_FILT_MAX; - unsigned int mask = 0xfff, reg_val, shift; - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - /* No such entry */ - if (tid < 0) { - - /* Go through all entries from first to last in vlan range */ - tid = mvpp2_prs_tcam_first_free(priv, vid_start, - vid_start + - MVPP2_PRS_VLAN_FILT_MAX_ENTRY); - - /* There isn't room for a new VID filter */ - if (tid < 0) - return tid; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Enable the current port */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set match on VID */ - mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Write parser entry for VID filtering */ -static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) -{ - struct mvpp2 *priv = port->priv; - int tid; - - /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); - - /* No such entry */ - if (tid < 0) - return; - - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; -} - -/* Remove all existing VID filters on this port */ -static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int tid; - - for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); - tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); - } -} - -/* Remove VID filering entry for this port */ -static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - - /* Invalidate the guard entry */ - mvpp2_prs_hw_inv(priv, tid); - - priv->prs_shadow[tid].valid = false; -} - -/* Add guard entry that drops packets when no VID is matched on this port */ -static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - unsigned int reg_val, shift; - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[tid].valid) - return; - - memset(&pe, 0, sizeof(pe)); - - pe.index = tid; - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Drop VLAN packets that don't belong to any VIDs on this port */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Parser default initialization */ -static int mvpp2_prs_default_init(struct platform_device *pdev, - struct mvpp2 *priv) -{ - int err, index, i; - - /* Enable tcam table */ - mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); - - /* Clear all tcam and sram entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); - - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); - } - - /* Invalidate all tcam entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) - mvpp2_prs_hw_inv(priv, index); - - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - - /* Always start from lookup = 0 */ - for (index = 0; index < MVPP2_MAX_PORTS; index++) - mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, - MVPP2_PRS_PORT_LU_MAX, 0); - - mvpp2_prs_def_flow_init(priv); - - mvpp2_prs_mh_init(priv); - - mvpp2_prs_mac_init(priv); - - mvpp2_prs_dsa_init(priv); - - mvpp2_prs_vid_init(priv); - - err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; - - return 0; -} - -/* Compare MAC DA with tcam entry data */ -static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, - const u8 *da, unsigned char *mask) -{ - unsigned char tcam_byte, tcam_mask; - int index; - - for (index = 0; index < ETH_ALEN; index++) { - mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); - if (tcam_mask != mask[index]) - return false; - - if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) - return false; - } - - return true; -} - -/* Find tcam entry with matched pair */ -static int -mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, - unsigned char *mask, int udf_type) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_MAC */ - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned int entry_pmap; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != udf_type)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); - - if (mvpp2_prs_mac_range_equals(&pe, da, mask) && - entry_pmap == pmap) - return tid; - } - - return -ENOENT; -} - -/* Update parser's mac da entry */ -static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, - bool add) -{ - unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - struct mvpp2 *priv = port->priv; - unsigned int pmap, len, ri; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, - MVPP2_PRS_UDF_MAC_DEF); - - /* No such entry */ - if (tid < 0) { - if (!add) - return 0; - - /* Create new TCAM entry */ - /* Go through the all entries from first to last */ - tid = mvpp2_prs_tcam_first_free(priv, - MVPP2_PE_MAC_RANGE_START, - MVPP2_PE_MAC_RANGE_END); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, add); - - /* Invalidate the entry if no ports are left enabled */ - pmap = mvpp2_prs_tcam_port_map_get(&pe); - if (pmap == 0) { - if (add) - return -EINVAL; - - mvpp2_prs_hw_inv(priv, pe.index); - priv->prs_shadow[pe.index].valid = false; - return 0; - } - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set match on DA */ - len = ETH_ALEN; - while (len--) - mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); - - /* Set result info bits */ - if (is_broadcast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_BCAST; - } else if (is_multicast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_MCAST; - } else { - ri = MVPP2_PRS_RI_L2_UCAST; - - if (ether_addr_equal(da, port->dev->dev_addr)) - ri |= MVPP2_PRS_RI_MAC_ME_MASK; - } - - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table and hw entry */ - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) -{ - struct mvpp2_port *port = netdev_priv(dev); - int err; - - /* Remove old parser entry */ - err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); - if (err) - return err; - - /* Add new parser entry */ - err = mvpp2_prs_mac_da_accept(port, da, true); - if (err) - return err; - - /* Set addr in the device */ - ether_addr_copy(dev->dev_addr, da); - - return 0; -} - -static void mvpp2_prs_mac_del_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - unsigned long pmap; - int index, tid; - - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - pmap = mvpp2_prs_tcam_port_map_get(&pe); - - /* We only want entries active on this port */ - if (!test_bit(port->id, &pmap)) - continue; - - /* Read mac addr from entry */ - for (index = 0; index < ETH_ALEN; index++) - mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], - &da_mask[index]); - - /* Special cases : Don't remove broadcast and port's own - * address - */ - if (is_broadcast_ether_addr(da) || - ether_addr_equal(da, port->dev->dev_addr)) - continue; - - /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); - } -} - -static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) -{ - switch (type) { - case MVPP2_TAG_TYPE_EDSA: - /* Add port to EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - /* Remove port from DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - break; - - case MVPP2_TAG_TYPE_DSA: - /* Add port to DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - /* Remove port from EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - case MVPP2_TAG_TYPE_MH: - case MVPP2_TAG_TYPE_NONE: - /* Remove port form EDSA and DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - default: - if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) - return -EINVAL; - } - - return 0; -} - -/* Set prs flow for the port */ -static int mvpp2_prs_def_flow(struct mvpp2_port *port) -{ - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_flow_find(port->priv, port->id); - - /* Such entry not exist */ - if (tid < 0) { - /* Go through the all entires from last to first */ - tid = mvpp2_prs_tcam_first_free(port->priv, - MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table */ - mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); - } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); - mvpp2_prs_hw_write(port->priv, &pe); - - return 0; -} - -/* Classifier configuration routines */ - -/* Update classification flow table registers */ -static void mvpp2_cls_flow_write(struct mvpp2 *priv, - struct mvpp2_cls_flow_entry *fe) -{ - mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); -} - -/* Update classification lookup table register */ -static void mvpp2_cls_lookup_write(struct mvpp2 *priv, - struct mvpp2_cls_lookup_entry *le) -{ - u32 val; - - val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; - mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); - mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); -} - -/* Classifier default initialization */ -static void mvpp2_cls_init(struct mvpp2 *priv) -{ - struct mvpp2_cls_lookup_entry le; - struct mvpp2_cls_flow_entry fe; - int index; - - /* Enable classifier */ - mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); - - /* Clear classifier flow table */ - memset(&fe.data, 0, sizeof(fe.data)); - for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { - fe.index = index; - mvpp2_cls_flow_write(priv, &fe); - } - - /* Clear classifier lookup table */ - le.data = 0; - for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { - le.lkpid = index; - le.way = 0; - mvpp2_cls_lookup_write(priv, &le); - - le.way = 1; - mvpp2_cls_lookup_write(priv, &le); - } -} - -static void mvpp2_cls_port_config(struct mvpp2_port *port) -{ - struct mvpp2_cls_lookup_entry le; - u32 val; - - /* Set way for the port */ - val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); - val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); - - /* Pick the entry to be accessed in lookup ID decoding table - * according to the way and lkpid. - */ - le.lkpid = port->id; - le.way = 0; - le.data = 0; - - /* Set initial CPU queue for receiving packets */ - le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; - le.data |= port->first_rxq; - - /* Disable classification engines */ - le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; - - /* Update lookup ID table entry */ - mvpp2_cls_lookup_write(port->priv, &le); -} - -/* Set CPU queue number for oversize packets */ -static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) -{ - u32 val; - - mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), - port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); - - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), - (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); - - val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); -} - -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) -{ - if (likely(pool->frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(pool->frag_size); - else - return kmalloc(pool->frag_size, GFP_ATOMIC); -} - -static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) -{ - if (likely(pool->frag_size <= PAGE_SIZE)) - skb_free_frag(data); - else - kfree(data); -} - -/* Buffer Manager configuration routines */ - -/* Create pool */ -static int mvpp2_bm_pool_create(struct platform_device *pdev, - struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, int size) -{ - u32 val; - - /* Number of buffer pointers must be a multiple of 16, as per - * hardware constraints - */ - if (!IS_ALIGNED(size, 16)) - return -EINVAL; - - /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 - * bytes per buffer pointer - */ - if (priv->hw_version == MVPP21) - bm_pool->size_bytes = 2 * sizeof(u32) * size; - else - bm_pool->size_bytes = 2 * sizeof(u64) * size; - - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, - &bm_pool->dma_addr, - GFP_KERNEL); - if (!bm_pool->virt_addr) - return -ENOMEM; - - if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, - MVPP2_BM_POOL_PTR_ALIGN)) { - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, - bm_pool->virt_addr, bm_pool->dma_addr); - dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", - bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); - return -ENOMEM; - } - - mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - lower_32_bits(bm_pool->dma_addr)); - mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); - - val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); - val |= MVPP2_BM_START_MASK; - mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - - bm_pool->size = size; - bm_pool->pkt_size = 0; - bm_pool->buf_num = 0; - - return 0; -} - -/* Set pool buffer size */ -static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, - int buf_size) -{ - u32 val; - - bm_pool->buf_size = buf_size; - - val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); - mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); -} - -static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *dma_addr, - phys_addr_t *phys_addr) -{ - int cpu = get_cpu(); - - *dma_addr = mvpp2_percpu_read(priv, cpu, - MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); - - if (priv->hw_version == MVPP22) { - u32 val; - u32 dma_addr_highbits, phys_addr_highbits; - - val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); - dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); - phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> - MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; - - if (sizeof(dma_addr_t) == 8) - *dma_addr |= (u64)dma_addr_highbits << 32; - - if (sizeof(phys_addr_t) == 8) - *phys_addr |= (u64)phys_addr_highbits << 32; - } - - put_cpu(); -} - -/* Free all buffers from the pool */ -static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, int buf_num) -{ - int i; - - if (buf_num > bm_pool->buf_num) { - WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", - bm_pool->id, buf_num); - buf_num = bm_pool->buf_num; - } - - for (i = 0; i < buf_num; i++) { - dma_addr_t buf_dma_addr; - phys_addr_t buf_phys_addr; - void *data; - - mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, - &buf_dma_addr, &buf_phys_addr); - - dma_unmap_single(dev, buf_dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE); - - data = (void *)phys_to_virt(buf_phys_addr); - if (!data) - break; - - mvpp2_frag_free(bm_pool, data); - } - - /* Update BM driver with number of buffers removed from pool */ - bm_pool->buf_num -= i; -} - -/* Check number of buffers in BM pool */ -static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) -{ - int buf_num = 0; - - buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & - MVPP22_BM_POOL_PTRS_NUM_MASK; - buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & - MVPP2_BM_BPPI_PTR_NUM_MASK; - - /* HW has one buffer ready which is not reflected in the counters */ - if (buf_num) - buf_num += 1; - - return buf_num; -} - -/* Cleanup pool */ -static int mvpp2_bm_pool_destroy(struct platform_device *pdev, - struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool) -{ - int buf_num; - u32 val; - - buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); - - /* Check buffer counters after free */ - buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); - if (buf_num) { - WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", - bm_pool->id, bm_pool->buf_num); - return 0; - } - - val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); - val |= MVPP2_BM_STOP_MASK; - mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, - bm_pool->virt_addr, - bm_pool->dma_addr); - return 0; -} - -static int mvpp2_bm_pools_init(struct platform_device *pdev, - struct mvpp2 *priv) -{ - int i, err, size; - struct mvpp2_bm_pool *bm_pool; - - /* Create all pools with maximum size */ - size = MVPP2_BM_POOL_SIZE_MAX; - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - bm_pool = &priv->bm_pools[i]; - bm_pool->id = i; - err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); - if (err) - goto err_unroll_pools; - mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); - } - return 0; - -err_unroll_pools: - dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); - for (i = i - 1; i >= 0; i--) - mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); - return err; -} - -static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - int i, err; - - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - /* Mask BM all interrupts */ - mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); - /* Clear BM cause register */ - mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); - } - - /* Allocate and initialize BM pools */ - priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, - sizeof(*priv->bm_pools), GFP_KERNEL); - if (!priv->bm_pools) - return -ENOMEM; - - err = mvpp2_bm_pools_init(pdev, priv); - if (err < 0) - return err; - return 0; -} - -static void mvpp2_setup_bm_pool(void) -{ - /* Short pool */ - mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; - mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; - - /* Long pool */ - mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; - mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; - - /* Jumbo pool */ - mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; - mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; -} - -/* Attach long pool to rxq */ -static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, - int lrxq, int long_pool) -{ - u32 val, mask; - int prxq; - - /* Get queue physical ID */ - prxq = port->rxqs[lrxq]->id; - - if (port->priv->hw_version == MVPP21) - mask = MVPP21_RXQ_POOL_LONG_MASK; - else - mask = MVPP22_RXQ_POOL_LONG_MASK; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~mask; - val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -/* Attach short pool to rxq */ -static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, - int lrxq, int short_pool) -{ - u32 val, mask; - int prxq; - - /* Get queue physical ID */ - prxq = port->rxqs[lrxq]->id; - - if (port->priv->hw_version == MVPP21) - mask = MVPP21_RXQ_POOL_SHORT_MASK; - else - mask = MVPP22_RXQ_POOL_SHORT_MASK; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~mask; - val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -static void *mvpp2_buf_alloc(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_dma_addr, - phys_addr_t *buf_phys_addr, - gfp_t gfp_mask) -{ - dma_addr_t dma_addr; - void *data; - - data = mvpp2_frag_alloc(bm_pool); - if (!data) - return NULL; - - dma_addr = dma_map_single(port->dev->dev.parent, data, - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { - mvpp2_frag_free(bm_pool, data); - return NULL; - } - *buf_dma_addr = dma_addr; - *buf_phys_addr = virt_to_phys(data); - - return data; -} - -/* Release buffer to BM */ -static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - dma_addr_t buf_dma_addr, - phys_addr_t buf_phys_addr) -{ - int cpu = get_cpu(); - - if (port->priv->hw_version == MVPP22) { - u32 val = 0; - - if (sizeof(dma_addr_t) == 8) - val |= upper_32_bits(buf_dma_addr) & - MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; - - if (sizeof(phys_addr_t) == 8) - val |= (upper_32_bits(buf_phys_addr) - << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & - MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP22_BM_ADDR_HIGH_RLS_REG, val); - } - - /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply - * returned in the "cookie" field of the RX - * descriptor. Instead of storing the virtual address, we - * store the physical address - */ - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); - - put_cpu(); -} - -/* Allocate buffers for the pool */ -static int mvpp2_bm_bufs_add(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, int buf_num) -{ - int i, buf_size, total_size; - dma_addr_t dma_addr; - phys_addr_t phys_addr; - void *buf; - - buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); - total_size = MVPP2_RX_TOTAL_SIZE(buf_size); - - if (buf_num < 0 || - (buf_num + bm_pool->buf_num > bm_pool->size)) { - netdev_err(port->dev, - "cannot allocate %d buffers for pool %d\n", - buf_num, bm_pool->id); - return 0; - } - - for (i = 0; i < buf_num; i++) { - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, - &phys_addr, GFP_KERNEL); - if (!buf) - break; - - mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, - phys_addr); - } - - /* Update BM driver with number of buffers added to pool */ - bm_pool->buf_num += i; - - netdev_dbg(port->dev, - "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", - bm_pool->id, bm_pool->pkt_size, buf_size, total_size); - - netdev_dbg(port->dev, - "pool %d: %d of %d buffers added\n", - bm_pool->id, i, buf_num); - return i; -} - -/* Notify the driver that BM pool is being used as specific type and return the - * pool pointer on success - */ -static struct mvpp2_bm_pool * -mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) -{ - struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; - int num; - - if (pool >= MVPP2_BM_POOLS_NUM) { - netdev_err(port->dev, "Invalid pool %d\n", pool); - return NULL; - } - - /* Allocate buffers in case BM pool is used as long pool, but packet - * size doesn't match MTU or BM pool hasn't being used yet - */ - if (new_pool->pkt_size == 0) { - int pkts_num; - - /* Set default buffer number or free all the buffers in case - * the pool is not empty - */ - pkts_num = new_pool->buf_num; - if (pkts_num == 0) - pkts_num = mvpp2_pools[pool].buf_num; - else - mvpp2_bm_bufs_free(port->dev->dev.parent, - port->priv, new_pool, pkts_num); - - new_pool->pkt_size = pkt_size; - new_pool->frag_size = - SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + - MVPP2_SKB_SHINFO_SIZE; - - /* Allocate buffers for this pool */ - num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); - if (num != pkts_num) { - WARN(1, "pool %d: %d of %d allocated\n", - new_pool->id, num, pkts_num); - return NULL; - } - } - - mvpp2_bm_pool_bufsize_set(port->priv, new_pool, - MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - - return new_pool; -} - -/* Initialize pools for swf */ -static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) -{ - int rxq; - enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; - - /* If port pkt_size is higher than 1518B: - * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool - * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool - */ - if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { - long_log_pool = MVPP2_BM_JUMBO; - short_log_pool = MVPP2_BM_LONG; - } else { - long_log_pool = MVPP2_BM_LONG; - short_log_pool = MVPP2_BM_SHORT; - } - - if (!port->pool_long) { - port->pool_long = - mvpp2_bm_pool_use(port, long_log_pool, - mvpp2_pools[long_log_pool].pkt_size); - if (!port->pool_long) - return -ENOMEM; - - port->pool_long->port_map |= BIT(port->id); - - for (rxq = 0; rxq < port->nrxqs; rxq++) - mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); - } - - if (!port->pool_short) { - port->pool_short = - mvpp2_bm_pool_use(port, short_log_pool, - mvpp2_pools[short_log_pool].pkt_size); - if (!port->pool_short) - return -ENOMEM; - - port->pool_short->port_map |= BIT(port->id); - - for (rxq = 0; rxq < port->nrxqs; rxq++) - mvpp2_rxq_short_pool_set(port, rxq, - port->pool_short->id); - } - - return 0; -} - -static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) -{ - struct mvpp2_port *port = netdev_priv(dev); - enum mvpp2_bm_pool_log_num new_long_pool; - int pkt_size = MVPP2_RX_PKT_SIZE(mtu); - - /* If port MTU is higher than 1518B: - * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool - * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool - */ - if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) - new_long_pool = MVPP2_BM_JUMBO; - else - new_long_pool = MVPP2_BM_LONG; - - if (new_long_pool != port->pool_long->id) { - /* Remove port from old short & long pool */ - port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, - port->pool_long->pkt_size); - port->pool_long->port_map &= ~BIT(port->id); - port->pool_long = NULL; - - port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, - port->pool_short->pkt_size); - port->pool_short->port_map &= ~BIT(port->id); - port->pool_short = NULL; - - port->pkt_size = pkt_size; - - /* Add port to new short & long pool */ - mvpp2_swf_bm_pool_init(port); - - /* Update L4 checksum when jumbo enable/disable on port */ - if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - dev->hw_features &= ~(NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - } else { - dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - } - } - - dev->mtu = mtu; - dev->wanted_features = dev->features; - - netdev_update_features(dev); - return 0; -} - -static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) -{ - int i, sw_thread_mask = 0; - - for (i = 0; i < port->nqvecs; i++) - sw_thread_mask |= port->qvecs[i].sw_thread_mask; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); -} - -static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) -{ - int i, sw_thread_mask = 0; - - for (i = 0; i < port->nqvecs; i++) - sw_thread_mask |= port->qvecs[i].sw_thread_mask; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); -} - -static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) -{ - struct mvpp2_port *port = qvec->port; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); -} - -static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) -{ - struct mvpp2_port *port = qvec->port; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); -} - -/* Mask the current CPU's Rx/Tx interrupts - * Called by on_each_cpu(), guaranteed to run with migration disabled, - * using smp_processor_id() is OK. - */ -static void mvpp2_interrupts_mask(void *arg) -{ - struct mvpp2_port *port = arg; - - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); -} - -/* Unmask the current CPU's Rx/Tx interrupts. - * Called by on_each_cpu(), guaranteed to run with migration disabled, - * using smp_processor_id() is OK. - */ -static void mvpp2_interrupts_unmask(void *arg) -{ - struct mvpp2_port *port = arg; - u32 val; - - val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - if (port->has_tx_irqs) - val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), val); -} - -static void -mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) -{ - u32 val; - int i; - - if (port->priv->hw_version != MVPP22) - return; - - if (mask) - val = 0; - else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *v = port->qvecs + i; - - if (v->type != MVPP2_QUEUE_VECTOR_SHARED) - continue; - - mvpp2_percpu_write(port->priv, v->sw_thread_id, - MVPP2_ISR_RX_TX_MASK_REG(port->id), val); - } -} - -/* Port configuration routines */ - -static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); - if (port->gop_id == 2) - val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; - else if (port->gop_id == 3) - val |= GENCONF_CTRL0_PORT1_RGMII_MII; - regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); -} - -static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | - GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - if (port->gop_id > 1) { - regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); - if (port->gop_id == 2) - val &= ~GENCONF_CTRL0_PORT0_RGMII; - else if (port->gop_id == 3) - val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; - regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); - } -} - -static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); - void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); - u32 val; - - /* XPCS */ - val = readl(xpcs + MVPP22_XPCS_CFG0); - val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | - MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); - val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); - writel(val, xpcs + MVPP22_XPCS_CFG0); - - /* MPCS */ - val = readl(mpcs + MVPP22_MPCS_CTRL); - val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; - writel(val, mpcs + MVPP22_MPCS_CTRL); - - val = readl(mpcs + MVPP22_MPCS_CLK_RESET); - val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | - MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); - val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); - - val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; - val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); -} - -static int mvpp22_gop_init(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - if (!priv->sysctrl_base) - return 0; - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - if (port->gop_id == 0) - goto invalid_conf; - mvpp22_gop_init_rgmii(port); - break; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - mvpp22_gop_init_sgmii(port); - break; - case PHY_INTERFACE_MODE_10GKR: - if (port->gop_id != 0) - goto invalid_conf; - mvpp22_gop_init_10gkr(port); - break; - default: - goto unsupported_conf; - } - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); - val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | - GENCONF_PORT_CTRL1_EN(port->gop_id); - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); - val |= GENCONF_SOFT_RESET1_GOP; - regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); - -unsupported_conf: - return 0; - -invalid_conf: - netdev_err(port->dev, "Invalid port configuration\n"); - return -EINVAL; -} - -static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) -{ - u32 val; - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { - /* Enable the GMAC link status irq for this port */ - val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); - val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); - } - - if (port->gop_id == 0) { - /* Enable the XLG/GIG irqs for this port */ - val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) - val |= MVPP22_XLG_EXT_INT_MASK_XLG; - else - val |= MVPP22_XLG_EXT_INT_MASK_GIG; - writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); - } -} - -static void mvpp22_gop_mask_irq(struct mvpp2_port *port) -{ - u32 val; - - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | - MVPP22_XLG_EXT_INT_MASK_GIG); - writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); - } - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { - val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); - val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); - } -} - -static void mvpp22_gop_setup_irq(struct mvpp2_port *port) -{ - u32 val; - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { - val = readl(port->base + MVPP22_GMAC_INT_MASK); - val |= MVPP22_GMAC_INT_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_MASK); - } - - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_INT_MASK); - val |= MVPP22_XLG_INT_MASK_LINK; - writel(val, port->base + MVPP22_XLG_INT_MASK); - } - - mvpp22_gop_unmask_irq(port); -} - -/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). - * - * The PHY mode used by the PPv2 driver comes from the network subsystem, while - * the one given to the COMPHY comes from the generic PHY subsystem. Hence they - * differ. - * - * The COMPHY configures the serdes lanes regardless of the actual use of the - * lanes by the physical layer. This is why configurations like - * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. - */ -static int mvpp22_comphy_init(struct mvpp2_port *port) -{ - enum phy_mode mode; - int ret; - - if (!port->comphy) - return 0; - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - mode = PHY_MODE_SGMII; - break; - case PHY_INTERFACE_MODE_2500BASEX: - mode = PHY_MODE_2500SGMII; - break; - case PHY_INTERFACE_MODE_10GKR: - mode = PHY_MODE_10GKR; - break; - default: - return -EINVAL; - } - - ret = phy_set_mode(port->comphy, mode); - if (ret) - return ret; - - return phy_power_on(port->comphy); -} - -static void mvpp2_port_enable(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val |= MVPP22_XLG_CTRL0_PORT_EN | - MVPP22_XLG_CTRL0_MAC_RESET_DIS; - val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - } else { - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val |= MVPP2_GMAC_PORT_EN_MASK; - val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - } -} - -static void mvpp2_port_disable(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val &= ~MVPP22_XLG_CTRL0_PORT_EN; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - - /* Disable & reset should be done separately */ - val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - } else { - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~(MVPP2_GMAC_PORT_EN_MASK); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - } -} - -/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ -static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & - ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); -} - -/* Configure loopback port */ -static void mvpp2_port_loopback_set(struct mvpp2_port *port, - const struct phylink_link_state *state) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); - - if (state->speed == 1000) - val |= MVPP2_GMAC_GMII_LB_EN_MASK; - else - val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; - - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) - val |= MVPP2_GMAC_PCS_LB_EN_MASK; - else - val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; - - writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); -} - -struct mvpp2_ethtool_counter { - unsigned int offset; - const char string[ETH_GSTRING_LEN]; - bool reg_is_64b; -}; - -static u64 mvpp2_read_count(struct mvpp2_port *port, - const struct mvpp2_ethtool_counter *counter) -{ - u64 val; - - val = readl(port->stats_base + counter->offset); - if (counter->reg_is_64b) - val += (u64)readl(port->stats_base + counter->offset + 4) << 32; - - return val; -} - -/* Due to the fact that software statistics and hardware statistics are, by - * design, incremented at different moments in the chain of packet processing, - * it is very likely that incoming packets could have been dropped after being - * counted by hardware but before reaching software statistics (most probably - * multicast packets), and in the oppposite way, during transmission, FCS bytes - * are added in between as well as TSO skb will be split and header bytes added. - * Hence, statistics gathered from userspace with ifconfig (software) and - * ethtool (hardware) cannot be compared. - */ -static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { - { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, - { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, - { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, - { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, - { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, - { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, - { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, - { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, - { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, - { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, - { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, - { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, - { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, - { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, - { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, - { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, - { MVPP2_MIB_FC_SENT, "fc_sent" }, - { MVPP2_MIB_FC_RCVD, "fc_received" }, - { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, - { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, - { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, - { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, - { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, - { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, - { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, - { MVPP2_MIB_COLLISION, "collision" }, - { MVPP2_MIB_LATE_COLLISION, "late_collision" }, -}; - -static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, - u8 *data) -{ - if (sset == ETH_SS_STATS) { - int i; - - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - memcpy(data + i * ETH_GSTRING_LEN, - &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); - } -} - -static void mvpp2_gather_hw_statistics(struct work_struct *work) -{ - struct delayed_work *del_work = to_delayed_work(work); - struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, - stats_work); - u64 *pstats; - int i; - - mutex_lock(&port->gather_stats_lock); - - pstats = port->ethtool_stats; - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - - /* No need to read again the counters right after this function if it - * was called asynchronously by the user (ie. use of ethtool). - */ - cancel_delayed_work(&port->stats_work); - queue_delayed_work(port->priv->stats_queue, &port->stats_work, - MVPP2_MIB_COUNTERS_STATS_DELAY); - - mutex_unlock(&port->gather_stats_lock); -} - -static void mvpp2_ethtool_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct mvpp2_port *port = netdev_priv(dev); - - /* Update statistics for the given port, then take the lock to avoid - * concurrent accesses on the ethtool_stats structure during its copy. - */ - mvpp2_gather_hw_statistics(&port->stats_work.work); - - mutex_lock(&port->gather_stats_lock); - memcpy(data, port->ethtool_stats, - sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); - mutex_unlock(&port->gather_stats_lock); -} - -static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvpp2_ethtool_regs); - - return -EOPNOTSUPP; -} - -static void mvpp2_port_reset(struct mvpp2_port *port) -{ - u32 val; - unsigned int i; - - /* Read the GOP statistics to reset the hardware counters */ - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - ~MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; -} - -/* Change maximum receive size of the port */ -static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; - val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << - MVPP2_GMAC_MAX_RX_SIZE_OFFS); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); -} - -/* Change maximum receive size of the port */ -static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP22_XLG_CTRL1_REG); - val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; - val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << - MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; - writel(val, port->base + MVPP22_XLG_CTRL1_REG); -} - -/* Set defaults to the MVPP2 port */ -static void mvpp2_defaults_set(struct mvpp2_port *port) -{ - int tx_port_num, val, queue, ptxq, lrxq; - - if (port->priv->hw_version == MVPP21) { - /* Update TX FIFO MIN Threshold */ - val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; - /* Min. TX threshold must be less than minimal packet length */ - val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); - writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - } - - /* Disable Legacy WRR, Disable EJP, Release from reset */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, - tx_port_num); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); - - /* Close bandwidth for all queues */ - for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { - ptxq = mvpp2_txq_phys(port->id, queue); - mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); - } - - /* Set refill period to 1 usec, refill tokens - * and bucket size to maximum - */ - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, - port->priv->tclk / USEC_PER_SEC); - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); - val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; - val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); - val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); - val = MVPP2_TXP_TOKEN_SIZE_MAX; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); - - /* Set MaximumLowLatencyPacketSize value to 256 */ - mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), - MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | - MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); - - /* Enable Rx cache snoop */ - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val |= MVPP2_SNOOP_PKT_SIZE_MASK | - MVPP2_SNOOP_BUF_HDR_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } - - /* At default, mask all interrupts to all present cpus */ - mvpp2_interrupts_disable(port); -} - -/* Enable/disable receiving packets */ -static void mvpp2_ingress_enable(struct mvpp2_port *port) -{ - u32 val; - int lrxq, queue; - - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val &= ~MVPP2_RXQ_DISABLE_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } -} - -static void mvpp2_ingress_disable(struct mvpp2_port *port) -{ - u32 val; - int lrxq, queue; - - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val |= MVPP2_RXQ_DISABLE_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } -} - -/* Enable transmit via physical egress queue - * - HW starts take descriptors from DRAM - */ -static void mvpp2_egress_enable(struct mvpp2_port *port) -{ - u32 qmap; - int queue; - int tx_port_num = mvpp2_egress_port(port); - - /* Enable all initialized TXs. */ - qmap = 0; - for (queue = 0; queue < port->ntxqs; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - if (txq->descs) - qmap |= (1 << queue); - } - - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); -} - -/* Disable transmit via physical egress queue - * - HW doesn't take descriptors from DRAM - */ -static void mvpp2_egress_disable(struct mvpp2_port *port) -{ - u32 reg_data; - int delay; - int tx_port_num = mvpp2_egress_port(port); - - /* Issue stop command for active channels only */ - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & - MVPP2_TXP_SCHED_ENQ_MASK; - if (reg_data != 0) - mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, - (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); - - /* Wait for all Tx activity to terminate. */ - delay = 0; - do { - if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { - netdev_warn(port->dev, - "Tx stop timed out, status=0x%08x\n", - reg_data); - break; - } - mdelay(1); - delay++; - - /* Check port TX Command register that all - * Tx queues are stopped - */ - reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); - } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); -} - -/* Rx descriptors helper methods */ - -/* Get number of Rx descriptors occupied by received packets */ -static inline int -mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) -{ - u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); - - return val & MVPP2_RXQ_OCCUPIED_MASK; -} - -/* Update Rx queue status with the number of occupied and available - * Rx descriptor slots. - */ -static inline void -mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, - int used_count, int free_count) -{ - /* Decrement the number of used descriptors and increment count - * increment the number of free descriptors. - */ - u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); - - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); -} - -/* Get pointer to next RX descriptor to be processed by SW */ -static inline struct mvpp2_rx_desc * -mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) -{ - int rx_desc = rxq->next_desc_to_proc; - - rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); - prefetch(rxq->descs + rxq->next_desc_to_proc); - return rxq->descs + rx_desc; -} - -/* Set rx queue offset */ -static void mvpp2_rxq_offset_set(struct mvpp2_port *port, - int prxq, int offset) -{ - u32 val; - - /* Convert offset from bytes to units of 32 bytes */ - offset = offset >> 5; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; - - /* Offset is in */ - val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & - MVPP2_RXQ_PACKET_OFFSET_MASK); - - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -/* Tx descriptors helper methods */ - -/* Get pointer to next Tx descriptor to be processed (send) by HW */ -static struct mvpp2_tx_desc * -mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) -{ - int tx_desc = txq->next_desc_to_proc; - - txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); - return txq->descs + tx_desc; -} - -/* Update HW with number of aggregated Tx descriptors to be sent - * - * Called only from mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) -{ - /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_AGGR_TXQ_UPDATE_REG, pending); -} - -/* Check if there are enough free descriptors in aggregated txq. - * If not, update the number of occupied descriptors and repeat the check. - * - * Called only from mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, - struct mvpp2_tx_queue *aggr_txq, int num) -{ - if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { - /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); - - aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; - - if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) - return -ENOMEM; - } - return 0; -} - -/* Reserved Tx descriptors allocation request - * - * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called - * only by mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, - struct mvpp2_tx_queue *txq, int num) -{ - u32 val; - int cpu = smp_processor_id(); - - val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - - val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); - - return val & MVPP2_TXQ_RSVD_RSLT_MASK; -} - -/* Check if there are enough reserved descriptors for transmission. - * If not, request chunk of reserved descriptors and check again. - */ -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, - struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int num) -{ - int req, cpu, desc_count; - - if (txq_pcpu->reserved_num >= num) - return 0; - - /* Not enough descriptors reserved! Update the reserved descriptor - * count and check again. - */ - - desc_count = 0; - /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { - struct mvpp2_txq_pcpu *txq_pcpu_aux; - - txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); - desc_count += txq_pcpu_aux->count; - desc_count += txq_pcpu_aux->reserved_num; - } - - req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); - desc_count += req; - - if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) - return -ENOMEM; - - txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); - - /* OK, the descriptor could have been updated: check again. */ - if (txq_pcpu->reserved_num < num) - return -ENOMEM; - return 0; -} - -/* Release the last allocated Tx descriptor. Useful to handle DMA - * mapping failures in the Tx path. - */ -static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) -{ - if (txq->next_desc_to_proc == 0) - txq->next_desc_to_proc = txq->last_desc - 1; - else - txq->next_desc_to_proc--; -} - -/* Set Tx descriptors fields relevant for CSUM calculation */ -static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, - int ip_hdr_len, int l4_proto) -{ - u32 command; - - /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, - * G_L4_chk, L4_type required only for checksum calculation - */ - command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); - command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); - command |= MVPP2_TXD_IP_CSUM_DISABLE; - - if (l3_proto == swab16(ETH_P_IP)) { - command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ - command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ - } else { - command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ - } - - if (l4_proto == IPPROTO_TCP) { - command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ - command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ - } else if (l4_proto == IPPROTO_UDP) { - command |= MVPP2_TXD_L4_UDP; /* enable UDP */ - command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ - } else { - command |= MVPP2_TXD_L4_CSUM_NOT; - } - - return command; -} - -/* Get number of sent descriptors and decrement counter. - * The number of sent descriptors is returned. - * Per-CPU access - * - * Called only from mvpp2_txq_done(), called from mvpp2_tx() - * (migration disabled) and from the TX completion tasklet (migration - * disabled) so using smp_processor_id() is OK. - */ -static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - - /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(txq->id)); - - return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> - MVPP2_TRANSMITTED_COUNT_OFFSET; -} - -/* Called through on_each_cpu(), so runs on all CPUs, with migration - * disabled, therefore using smp_processor_id() is OK. - */ -static void mvpp2_txq_sent_counter_clear(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - - for (queue = 0; queue < port->ntxqs; queue++) { - int id = port->txqs[queue]->id; - - mvpp2_percpu_read(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(id)); - } -} - -/* Set max sizes for Tx queues */ -static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) -{ - u32 val, size, mtu; - int txq, tx_port_num; - - mtu = port->pkt_size * 8; - if (mtu > MVPP2_TXP_MTU_MAX) - mtu = MVPP2_TXP_MTU_MAX; - - /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ - mtu = 3 * mtu; - - /* Indirect access to registers */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - - /* Set MTU */ - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); - val &= ~MVPP2_TXP_MTU_MAX; - val |= mtu; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); - - /* TXP token size and all TXQs token size must be larger that MTU */ - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); - size = val & MVPP2_TXP_TOKEN_SIZE_MAX; - if (size < mtu) { - size = mtu; - val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; - val |= size; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); - } - - for (txq = 0; txq < port->ntxqs; txq++) { - val = mvpp2_read(port->priv, - MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); - size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; - - if (size < mtu) { - size = mtu; - val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; - val |= size; - mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), - val); - } - } -} - -/* Set the number of packets that will be received before Rx interrupt - * will be generated by HW. - */ -static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int cpu = get_cpu(); - - if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) - rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, - rxq->pkts_coal); - - put_cpu(); -} - -/* For some reason in the LSP this is done on each CPU. Why ? */ -static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - int cpu = get_cpu(); - u32 val; - - if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) - txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; - - val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); - - put_cpu(); -} - -static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) -{ - u64 tmp = (u64)clk_hz * usec; - - do_div(tmp, USEC_PER_SEC); - - return tmp > U32_MAX ? U32_MAX : tmp; -} - -static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) -{ - u64 tmp = (u64)cycles * USEC_PER_SEC; - - do_div(tmp, clk_hz); - - return tmp > U32_MAX ? U32_MAX : tmp; -} - -/* Set the time delay in usec before Rx interrupt */ -static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - unsigned long freq = port->priv->tclk; - u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - - if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { - rxq->time_coal = - mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); - - /* re-evaluate to get actual register value */ - val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - } - - mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); -} - -static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) -{ - unsigned long freq = port->priv->tclk; - u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); - - if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { - port->tx_time_coal = - mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); - - /* re-evaluate to get actual register value */ - val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); - } - - mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); -} - -/* Free Tx queue skbuffs */ -static void mvpp2_txq_bufs_free(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu, int num) -{ - int i; - - for (i = 0; i < num; i++) { - struct mvpp2_txq_pcpu_buf *tx_buf = - txq_pcpu->buffs + txq_pcpu->txq_get_index; - - if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) - dma_unmap_single(port->dev->dev.parent, tx_buf->dma, - tx_buf->size, DMA_TO_DEVICE); - if (tx_buf->skb) - dev_kfree_skb_any(tx_buf->skb); - - mvpp2_txq_inc_get(txq_pcpu); - } -} - -static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, - u32 cause) -{ - int queue = fls(cause) - 1; - - return port->rxqs[queue]; -} - -static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, - u32 cause) -{ - int queue = fls(cause) - 1; - - return port->txqs[queue]; -} - -/* Handle end of transmission */ -static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu) -{ - struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); - int tx_done; - - if (txq_pcpu->cpu != smp_processor_id()) - netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); - - tx_done = mvpp2_txq_sent_desc_proc(port, txq); - if (!tx_done) - return; - mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); - - txq_pcpu->count -= tx_done; - - if (netif_tx_queue_stopped(nq)) - if (txq_pcpu->count <= txq_pcpu->wake_threshold) - netif_tx_wake_queue(nq); -} - -static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) -{ - struct mvpp2_tx_queue *txq; - struct mvpp2_txq_pcpu *txq_pcpu; - unsigned int tx_todo = 0; - - while (cause) { - txq = mvpp2_get_tx_queue(port, cause); - if (!txq) - break; - - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - - if (txq_pcpu->count) { - mvpp2_txq_done(port, txq, txq_pcpu); - tx_todo += txq_pcpu->count; - } - - cause &= ~(1 << txq->log_id); - } - return tx_todo; -} - -/* Rx/Tx queue initialization/cleanup methods */ - -/* Allocate and initialize descriptors for aggr TXQ */ -static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) -{ - u32 txq_dma; - - /* Allocate memory for TX descriptors */ - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, - &aggr_txq->descs_dma, GFP_KERNEL); - if (!aggr_txq->descs) - return -ENOMEM; - - aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; - - /* Aggr TXQ no reset WA */ - aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); - - /* Set Tx descriptors queue starting address indirect - * access - */ - if (priv->hw_version == MVPP21) - txq_dma = aggr_txq->descs_dma; - else - txq_dma = aggr_txq->descs_dma >> - MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; - - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), - MVPP2_AGGR_TXQ_SIZE); - - return 0; -} - -/* Create a specified Rx queue */ -static int mvpp2_rxq_init(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) - -{ - u32 rxq_dma; - int cpu; - - rxq->size = port->rx_ring_size; - - /* Allocate memory for RX descriptors */ - rxq->descs = dma_alloc_coherent(port->dev->dev.parent, - rxq->size * MVPP2_DESC_ALIGNED_SIZE, - &rxq->descs_dma, GFP_KERNEL); - if (!rxq->descs) - return -ENOMEM; - - rxq->last_desc = rxq->size - 1; - - /* Zero occupied and non-occupied counters - direct access */ - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - - /* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - if (port->priv->hw_version == MVPP21) - rxq_dma = rxq->descs_dma; - else - rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); - put_cpu(); - - /* Set Offset */ - mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); - - /* Set coalescing pkts and time */ - mvpp2_rx_pkts_coal_set(port, rxq); - mvpp2_rx_time_coal_set(port, rxq); - - /* Add number of descriptors ready for receiving packets */ - mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); - - return 0; -} - -/* Push packets received by the RXQ to BM pool */ -static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int rx_received, i; - - rx_received = mvpp2_rxq_received(port, rxq->id); - if (!rx_received) - return; - - for (i = 0; i < rx_received; i++) { - struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 status = mvpp2_rxdesc_status_get(port, rx_desc); - int pool; - - pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - - mvpp2_bm_pool_put(port, pool, - mvpp2_rxdesc_dma_addr_get(port, rx_desc), - mvpp2_rxdesc_cookie_get(port, rx_desc)); - } - mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); -} - -/* Cleanup Rx queue */ -static void mvpp2_rxq_deinit(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int cpu; - - mvpp2_rxq_drop_pkts(port, rxq); - - if (rxq->descs) - dma_free_coherent(port->dev->dev.parent, - rxq->size * MVPP2_DESC_ALIGNED_SIZE, - rxq->descs, - rxq->descs_dma); - - rxq->descs = NULL; - rxq->last_desc = 0; - rxq->next_desc_to_proc = 0; - rxq->descs_dma = 0; - - /* Clear Rx descriptors queue starting address and size; - * free descriptor number - */ - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); - put_cpu(); -} - -/* Create and initialize a Tx queue */ -static int mvpp2_txq_init(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - int cpu, desc, desc_per_txq, tx_port_num; - struct mvpp2_txq_pcpu *txq_pcpu; - - txq->size = port->tx_ring_size; - - /* Allocate memory for Tx descriptors */ - txq->descs = dma_alloc_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - &txq->descs_dma, GFP_KERNEL); - if (!txq->descs) - return -ENOMEM; - - txq->last_desc = txq->size - 1; - - /* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, - txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, - txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, - txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); - val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); - - /* Calculate base address in prefetch buffer. We reserve 16 descriptors - * for each existing TXQ. - * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT - * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS - */ - desc_per_txq = 16; - desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + - (txq->log_id * desc_per_txq); - - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, - MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | - MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); - put_cpu(); - - /* WRR / EJP configuration - indirect access */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - - val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); - val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; - val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); - val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); - - val = MVPP2_TXQ_TOKEN_SIZE_MAX; - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), - val); - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->size = txq->size; - txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, - sizeof(*txq_pcpu->buffs), - GFP_KERNEL); - if (!txq_pcpu->buffs) - return -ENOMEM; - - txq_pcpu->count = 0; - txq_pcpu->reserved_num = 0; - txq_pcpu->txq_put_index = 0; - txq_pcpu->txq_get_index = 0; - txq_pcpu->tso_headers = NULL; - - txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; - txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; - - txq_pcpu->tso_headers = - dma_alloc_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - &txq_pcpu->tso_headers_dma, - GFP_KERNEL); - if (!txq_pcpu->tso_headers) - return -ENOMEM; - } - - return 0; -} - -/* Free allocated TXQ resources */ -static void mvpp2_txq_deinit(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->buffs); - - if (txq_pcpu->tso_headers) - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); - - txq_pcpu->tso_headers = NULL; - } - - if (txq->descs) - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_dma); - - txq->descs = NULL; - txq->last_desc = 0; - txq->next_desc_to_proc = 0; - txq->descs_dma = 0; - - /* Set minimum bandwidth for disabled TXQs */ - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); - - /* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); - put_cpu(); -} - -/* Cleanup Tx ports */ -static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; - u32 val; - - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); - val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); - - /* The napi queue has been stopped so wait for all packets - * to be transmitted. - */ - delay = 0; - do { - if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { - netdev_warn(port->dev, - "port %d: cleaning queue %d timed out\n", - port->id, txq->log_id); - break; - } - mdelay(1); - delay++; - - pending = mvpp2_percpu_read(port->priv, cpu, - MVPP2_TXQ_PENDING_REG); - pending &= MVPP2_TXQ_PENDING_MASK; - } while (pending); - - val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); - put_cpu(); - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - - /* Release all packets */ - mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); - - /* Reset queue */ - txq_pcpu->count = 0; - txq_pcpu->txq_put_index = 0; - txq_pcpu->txq_get_index = 0; - } -} - -/* Cleanup all Tx queues */ -static void mvpp2_cleanup_txqs(struct mvpp2_port *port) -{ - struct mvpp2_tx_queue *txq; - int queue; - u32 val; - - val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); - - /* Reset Tx ports and delete Tx queues */ - val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); - mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); - - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - mvpp2_txq_clean(port, txq); - mvpp2_txq_deinit(port, txq); - } - - on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); - - val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); - mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); -} - -/* Cleanup all Rx queues */ -static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) -{ - int queue; - - for (queue = 0; queue < port->nrxqs; queue++) - mvpp2_rxq_deinit(port, port->rxqs[queue]); -} - -/* Init all Rx queues for port */ -static int mvpp2_setup_rxqs(struct mvpp2_port *port) -{ - int queue, err; - - for (queue = 0; queue < port->nrxqs; queue++) { - err = mvpp2_rxq_init(port, port->rxqs[queue]); - if (err) - goto err_cleanup; - } - return 0; - -err_cleanup: - mvpp2_cleanup_rxqs(port); - return err; -} - -/* Init all tx queues for port */ -static int mvpp2_setup_txqs(struct mvpp2_port *port) -{ - struct mvpp2_tx_queue *txq; - int queue, err; - - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - err = mvpp2_txq_init(port, txq); - if (err) - goto err_cleanup; - } - - if (port->has_tx_irqs) { - mvpp2_tx_time_coal_set(port); - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - mvpp2_tx_pkts_coal_set(port, txq); - } - } - - on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); - return 0; - -err_cleanup: - mvpp2_cleanup_txqs(port); - return err; -} - -/* The callback for per-port interrupt */ -static irqreturn_t mvpp2_isr(int irq, void *dev_id) -{ - struct mvpp2_queue_vector *qv = dev_id; - - mvpp2_qvec_interrupt_disable(qv); - - napi_schedule(&qv->napi); - - return IRQ_HANDLED; -} - -/* Per-port interrupt for link status changes */ -static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) -{ - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; - struct net_device *dev = port->dev; - bool event = false, link = false; - u32 val; - - mvpp22_gop_mask_irq(port); - - if (port->gop_id == 0 && - port->phy_interface == PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP22_XLG_INT_STAT); - if (val & MVPP22_XLG_INT_STAT_LINK) { - event = true; - val = readl(port->base + MVPP22_XLG_STATUS); - if (val & MVPP22_XLG_STATUS_LINK_UP) - link = true; - } - } else if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { - val = readl(port->base + MVPP22_GMAC_INT_STAT); - if (val & MVPP22_GMAC_INT_STAT_LINK) { - event = true; - val = readl(port->base + MVPP2_GMAC_STATUS0); - if (val & MVPP2_GMAC_STATUS0_LINK_UP) - link = true; - } - } - - if (port->phylink) { - phylink_mac_change(port->phylink, link); - goto handled; - } - - if (!netif_running(dev) || !event) - goto handled; - - if (link) { - mvpp2_interrupts_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } else { - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - mvpp2_ingress_disable(port); - mvpp2_egress_disable(port); - - mvpp2_interrupts_disable(port); - } - -handled: - mvpp22_gop_unmask_irq(port); - return IRQ_HANDLED; -} - -static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) -{ - ktime_t interval; - - if (!port_pcpu->timer_scheduled) { - port_pcpu->timer_scheduled = true; - interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; - hrtimer_start(&port_pcpu->tx_done_timer, interval, - HRTIMER_MODE_REL_PINNED); - } -} - -static void mvpp2_tx_proc_cb(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); - unsigned int tx_todo, cause; - - if (!netif_running(dev)) - return; - port_pcpu->timer_scheduled = false; - - /* Process all the Tx queues */ - cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); - - /* Set the timer in case not all the packets were processed */ - if (tx_todo) - mvpp2_timer_set(port_pcpu); -} - -static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) -{ - struct mvpp2_port_pcpu *port_pcpu = container_of(timer, - struct mvpp2_port_pcpu, - tx_done_timer); - - tasklet_schedule(&port_pcpu->tx_done_tasklet); - - return HRTIMER_NORESTART; -} - -/* Main RX/TX processing routines */ - -/* Display more error info */ -static void mvpp2_rx_error(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - u32 status = mvpp2_rxdesc_status_get(port, rx_desc); - size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); - char *err_str = NULL; - - switch (status & MVPP2_RXD_ERR_CODE_MASK) { - case MVPP2_RXD_ERR_CRC: - err_str = "crc"; - break; - case MVPP2_RXD_ERR_OVERRUN: - err_str = "overrun"; - break; - case MVPP2_RXD_ERR_RESOURCE: - err_str = "resource"; - break; - } - if (err_str && net_ratelimit()) - netdev_err(port->dev, - "bad rx status %08x (%s error), size=%zu\n", - status, err_str, sz); -} - -/* Handle RX checksum offload */ -static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, - struct sk_buff *skb) -{ - if (((status & MVPP2_RXD_L3_IP4) && - !(status & MVPP2_RXD_IP4_HEADER_ERR)) || - (status & MVPP2_RXD_L3_IP6)) - if (((status & MVPP2_RXD_L4_UDP) || - (status & MVPP2_RXD_L4_TCP)) && - (status & MVPP2_RXD_L4_CSUM_OK)) { - skb->csum = 0; - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } - - skb->ip_summed = CHECKSUM_NONE; -} - -/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ -static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, int pool) -{ - dma_addr_t dma_addr; - phys_addr_t phys_addr; - void *buf; - - /* No recycle or too many buffers are in use, so allocate a new skb */ - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, - GFP_ATOMIC); - if (!buf) - return -ENOMEM; - - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); - - return 0; -} - -/* Handle tx checksum */ -static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) -{ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - int ip_hdr_len = 0; - u8 l4_proto; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *ip4h = ip_hdr(skb); - - /* Calculate IPv4 checksum and L4 checksum */ - ip_hdr_len = ip4h->ihl; - l4_proto = ip4h->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); - - /* Read l4_protocol from one of IPv6 extra headers */ - if (skb_network_header_len(skb) > 0) - ip_hdr_len = (skb_network_header_len(skb) >> 2); - l4_proto = ip6h->nexthdr; - } else { - return MVPP2_TXD_L4_CSUM_NOT; - } - - return mvpp2_txq_desc_csum(skb_network_offset(skb), - skb->protocol, ip_hdr_len, l4_proto); - } - - return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; -} - -/* Main rx processing */ -static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, - int rx_todo, struct mvpp2_rx_queue *rxq) -{ - struct net_device *dev = port->dev; - int rx_received; - int rx_done = 0; - u32 rcvd_pkts = 0; - u32 rcvd_bytes = 0; - - /* Get number of received packets and clamp the to-do */ - rx_received = mvpp2_rxq_received(port, rxq->id); - if (rx_todo > rx_received) - rx_todo = rx_received; - - while (rx_done < rx_todo) { - struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - struct mvpp2_bm_pool *bm_pool; - struct sk_buff *skb; - unsigned int frag_size; - dma_addr_t dma_addr; - phys_addr_t phys_addr; - u32 rx_status; - int pool, rx_bytes, err; - void *data; - - rx_done++; - rx_status = mvpp2_rxdesc_status_get(port, rx_desc); - rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); - rx_bytes -= MVPP2_MH_SIZE; - dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); - phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); - data = (void *)phys_to_virt(phys_addr); - - pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - bm_pool = &port->priv->bm_pools[pool]; - - /* In case of an error, release the requested buffer pointer - * to the Buffer Manager. This request process is controlled - * by the hardware, and the information about the buffer is - * comprised by the RX descriptor. - */ - if (rx_status & MVPP2_RXD_ERR_SUMMARY) { -err_drop_frame: - dev->stats.rx_errors++; - mvpp2_rx_error(port, rx_desc); - /* Return the buffer to the pool */ - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); - continue; - } - - if (bm_pool->frag_size > PAGE_SIZE) - frag_size = 0; - else - frag_size = bm_pool->frag_size; - - skb = build_skb(data, frag_size); - if (!skb) { - netdev_warn(port->dev, "skb build failed\n"); - goto err_drop_frame; - } - - err = mvpp2_rx_refill(port, bm_pool, pool); - if (err) { - netdev_err(port->dev, "failed to refill BM pools\n"); - goto err_drop_frame; - } - - dma_unmap_single(dev->dev.parent, dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE); - - rcvd_pkts++; - rcvd_bytes += rx_bytes; - - skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); - skb_put(skb, rx_bytes); - skb->protocol = eth_type_trans(skb, dev); - mvpp2_rx_csum(port, rx_status, skb); - - napi_gro_receive(napi, skb); - } - - if (rcvd_pkts) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets += rcvd_pkts; - stats->rx_bytes += rcvd_bytes; - u64_stats_update_end(&stats->syncp); - } - - /* Update Rx queue management counters */ - wmb(); - mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); - - return rx_todo; -} - -static inline void -tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, - struct mvpp2_tx_desc *desc) -{ - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - dma_addr_t buf_dma_addr = - mvpp2_txdesc_dma_addr_get(port, desc); - size_t buf_sz = - mvpp2_txdesc_size_get(port, desc); - if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) - dma_unmap_single(port->dev->dev.parent, buf_dma_addr, - buf_sz, DMA_TO_DEVICE); - mvpp2_txq_desc_put(txq); -} - -/* Handle tx fragmentation processing */ -static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - struct mvpp2_tx_desc *tx_desc; - int i; - dma_addr_t buf_dma_addr; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; - - tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, frag->size); - - buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, - frag->size, DMA_TO_DEVICE); - if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { - mvpp2_txq_desc_put(txq); - goto cleanup; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - if (i == (skb_shinfo(skb)->nr_frags - 1)) { - /* Last descriptor */ - mvpp2_txdesc_cmd_set(port, tx_desc, - MVPP2_TXD_L_DESC); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - } else { - /* Descriptor in the middle: Not First, Not Last */ - mvpp2_txdesc_cmd_set(port, tx_desc, 0); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - } - } - - return 0; -cleanup: - /* Release all descriptors that were used to map fragments of - * this packet, as well as the corresponding DMA mappings - */ - for (i = i - 1; i >= 0; i--) { - tx_desc = txq->descs + i; - tx_desc_unmap_put(port, txq, tx_desc); - } - - return -ENOMEM; -} - -static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, - struct net_device *dev, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int hdr_sz) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - dma_addr_t addr; - - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); - - addr = txq_pcpu->tso_headers_dma + - txq_pcpu->txq_put_index * TSO_HEADER_SIZE; - mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); - - mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | - MVPP2_TXD_F_DESC | - MVPP2_TXD_PADDING_DISABLE); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); -} - -static inline int mvpp2_tso_put_data(struct sk_buff *skb, - struct net_device *dev, struct tso_t *tso, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int sz, bool left, bool last) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - dma_addr_t buf_dma_addr; - - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, sz); - - buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { - mvpp2_txq_desc_put(txq); - return -ENOMEM; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - if (!left) { - mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); - if (last) { - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - return 0; - } - } else { - mvpp2_txdesc_cmd_set(port, tx_desc, 0); - } - - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - return 0; -} - -static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; - - /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, - tso_count_descs(skb))) - return 0; - - tso_start(skb, &tso); - len = skb->len - hdr_sz; - while (len > 0) { - int left = min_t(int, skb_shinfo(skb)->gso_size, len); - char *hdr = txq_pcpu->tso_headers + - txq_pcpu->txq_put_index * TSO_HEADER_SIZE; - - len -= left; - descs++; - - tso_build_hdr(skb, hdr, &tso, left, len == 0); - mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); - - while (left > 0) { - int sz = min_t(int, tso.size, left); - left -= sz; - descs++; - - if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, - txq_pcpu, sz, left, len == 0)) - goto release; - tso_build_data(skb, &tso, sz); - } - } - - return descs; - -release: - for (i = descs - 1; i >= 0; i--) { - struct mvpp2_tx_desc *tx_desc = txq->descs + i; - tx_desc_unmap_put(port, txq, tx_desc); - } - return 0; -} - -/* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_queue *txq, *aggr_txq; - struct mvpp2_txq_pcpu *txq_pcpu; - struct mvpp2_tx_desc *tx_desc; - dma_addr_t buf_dma_addr; - int frags = 0; - u16 txq_id; - u32 tx_cmd; - - txq_id = skb_get_queue_mapping(skb); - txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; - - if (skb_is_gso(skb)) { - frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); - goto out; - } - frags = skb_shinfo(skb)->nr_frags + 1; - - /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { - frags = 0; - goto out; - } - - /* Get a descriptor for the first part of the packet */ - tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); - - buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { - mvpp2_txq_desc_put(txq); - frags = 0; - goto out; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - tx_cmd = mvpp2_skb_tx_csum(port, skb); - - if (frags == 1) { - /* First and Last descriptor */ - tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; - mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - } else { - /* First but not Last */ - tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; - mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - - /* Continue with other skb fragments */ - if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { - tx_desc_unmap_put(port, txq, tx_desc); - frags = 0; - } - } - -out: - if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); - struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - - txq_pcpu->reserved_num -= frags; - txq_pcpu->count += frags; - aggr_txq->count += frags; - - /* Enable transmit */ - wmb(); - mvpp2_aggr_txq_pend_desc_add(port, frags); - - if (txq_pcpu->count >= txq_pcpu->stop_threshold) - netif_tx_stop_queue(nq); - - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - } else { - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - } - - /* Finalize TX processing */ - if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) - mvpp2_txq_done(port, txq, txq_pcpu); - - /* Set the timer in case not all frags were processed */ - if (!port->has_tx_irqs && txq_pcpu->count <= frags && - txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); - - mvpp2_timer_set(port_pcpu); - } - - return NETDEV_TX_OK; -} - -static inline void mvpp2_cause_error(struct net_device *dev, int cause) -{ - if (cause & MVPP2_CAUSE_FCS_ERR_MASK) - netdev_err(dev, "FCS error\n"); - if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) - netdev_err(dev, "rx fifo overrun error\n"); - if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) - netdev_err(dev, "tx fifo underrun error\n"); -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); - - qv = container_of(napi, struct mvpp2_queue_vector, napi); - - /* Rx/Tx cause register - * - * Bits 0-15: each bit indicates received packets on the Rx queue - * (bit 0 is for Rx queue 0). - * - * Bits 16-23: each bit indicates transmitted packets on the Tx queue - * (bit 16 is for Tx queue 0). - * - * Each CPU has its own Rx/Tx cause register - */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - - cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; - if (cause_misc) { - mvpp2_cause_error(port->dev, cause_misc); - - /* Clear the cause register */ - mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id), - cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); - } - - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); - } - - /* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - cause_rx <<= qv->first_rxq; - cause_rx |= qv->pending_cause_rx; - while (cause_rx && budget > 0) { - int count; - struct mvpp2_rx_queue *rxq; - - rxq = mvpp2_get_rx_queue(port, cause_rx); - if (!rxq) - break; - - count = mvpp2_rx(port, napi, budget, rxq); - rx_done += count; - budget -= count; - if (budget > 0) { - /* Clear the bit associated to this Rx queue - * so that next iteration will continue from - * the next Rx queue. - */ - cause_rx &= ~(1 << rxq->logic_rxq); - } - } - - if (budget > 0) { - cause_rx = 0; - napi_complete_done(napi, rx_done); - - mvpp2_qvec_interrupt_enable(qv); - } - qv->pending_cause_rx = cause_rx; - return rx_done; -} - -static void mvpp22_mode_reconfigure(struct mvpp2_port *port) -{ - u32 ctrl3; - - /* comphy reconfiguration */ - mvpp22_comphy_init(port); - - /* gop reconfiguration */ - mvpp22_gop_init(port); - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0) { - ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); - ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR) - ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; - else - ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; - - writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); - } - - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) - mvpp2_xlg_max_rx_size_set(port); - else - mvpp2_gmac_max_rx_size_set(port); -} - -/* Set hw internals when starting port */ -static void mvpp2_start_dev(struct mvpp2_port *port) -{ - int i; - - mvpp2_txp_max_tx_size_set(port); - - for (i = 0; i < port->nqvecs; i++) - napi_enable(&port->qvecs[i].napi); - - /* Enable interrupts on all CPUs */ - mvpp2_interrupts_enable(port); - - if (port->priv->hw_version == MVPP22) - mvpp22_mode_reconfigure(port); - - if (port->phylink) { - phylink_start(port->phylink); - } else { - /* Phylink isn't used as of now for ACPI, so the MAC has to be - * configured manually when the interface is started. This will - * be removed as soon as the phylink ACPI support lands in. - */ - struct phylink_link_state state = { - .interface = port->phy_interface, - .link = 1, - }; - mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); - } - - netif_tx_start_all_queues(port->dev); -} - -/* Set hw internals when stopping port */ -static void mvpp2_stop_dev(struct mvpp2_port *port) -{ - int i; - - /* Disable interrupts on all CPUs */ - mvpp2_interrupts_disable(port); - - for (i = 0; i < port->nqvecs; i++) - napi_disable(&port->qvecs[i].napi); - - if (port->phylink) - phylink_stop(port->phylink); - phy_power_off(port->comphy); -} - -static int mvpp2_check_ringparam_valid(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - u16 new_rx_pending = ring->rx_pending; - u16 new_tx_pending = ring->tx_pending; - - if (ring->rx_pending == 0 || ring->tx_pending == 0) - return -EINVAL; - - if (ring->rx_pending > MVPP2_MAX_RXD_MAX) - new_rx_pending = MVPP2_MAX_RXD_MAX; - else if (!IS_ALIGNED(ring->rx_pending, 16)) - new_rx_pending = ALIGN(ring->rx_pending, 16); - - if (ring->tx_pending > MVPP2_MAX_TXD_MAX) - new_tx_pending = MVPP2_MAX_TXD_MAX; - else if (!IS_ALIGNED(ring->tx_pending, 32)) - new_tx_pending = ALIGN(ring->tx_pending, 32); - - /* The Tx ring size cannot be smaller than the minimum number of - * descriptors needed for TSO. - */ - if (new_tx_pending < MVPP2_MAX_SKB_DESCS) - new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); - - if (ring->rx_pending != new_rx_pending) { - netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", - ring->rx_pending, new_rx_pending); - ring->rx_pending = new_rx_pending; - } - - if (ring->tx_pending != new_tx_pending) { - netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", - ring->tx_pending, new_tx_pending); - ring->tx_pending = new_tx_pending; - } - - return 0; -} - -static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) -{ - u32 mac_addr_l, mac_addr_m, mac_addr_h; - - mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); - mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); - mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); - addr[0] = (mac_addr_h >> 24) & 0xFF; - addr[1] = (mac_addr_h >> 16) & 0xFF; - addr[2] = (mac_addr_h >> 8) & 0xFF; - addr[3] = mac_addr_h & 0xFF; - addr[4] = mac_addr_m & 0xFF; - addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; -} - -static int mvpp2_irqs_init(struct mvpp2_port *port) -{ - int err, i; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); - - err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); - if (err) - goto err; - - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); - } - - return 0; -err: - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - irq_set_affinity_hint(qv->irq, NULL); - free_irq(qv->irq, qv); - } - - return err; -} - -static void mvpp2_irqs_deinit(struct mvpp2_port *port) -{ - int i; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - irq_set_affinity_hint(qv->irq, NULL); - irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); - free_irq(qv->irq, qv); - } -} - -static void mvpp22_init_rss(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int i; - - /* Set the table width: replace the whole classifier Rx queue number - * with the ones configured in RSS table entries. - */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); - mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. - */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } - - /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. - */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); - - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } - -} - -static int mvpp2_open(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2 *priv = port->priv; - unsigned char mac_bcast[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - bool valid = false; - int err; - - err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); - if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); - return err; - } - err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); - if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); - return err; - } - err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); - if (err) { - netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); - return err; - } - err = mvpp2_prs_def_flow(port); - if (err) { - netdev_err(dev, "mvpp2_prs_def_flow failed\n"); - return err; - } - - /* Allocate the Rx/Tx queues */ - err = mvpp2_setup_rxqs(port); - if (err) { - netdev_err(port->dev, "cannot allocate Rx queues\n"); - return err; - } - - err = mvpp2_setup_txqs(port); - if (err) { - netdev_err(port->dev, "cannot allocate Tx queues\n"); - goto err_cleanup_rxqs; - } - - err = mvpp2_irqs_init(port); - if (err) { - netdev_err(port->dev, "cannot init IRQs\n"); - goto err_cleanup_txqs; - } - - /* Phylink isn't supported yet in ACPI mode */ - if (port->of_node) { - err = phylink_of_phy_connect(port->phylink, port->of_node, 0); - if (err) { - netdev_err(port->dev, "could not attach PHY (%d)\n", - err); - goto err_free_irq; - } - - valid = true; - } - - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { - err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, - dev->name, port); - if (err) { - netdev_err(port->dev, "cannot request link IRQ %d\n", - port->link_irq); - goto err_free_irq; - } - - mvpp22_gop_setup_irq(port); - - /* In default link is down */ - netif_carrier_off(port->dev); - - valid = true; - } else { - port->link_irq = 0; - } - - if (!valid) { - netdev_err(port->dev, - "invalid configuration: no dt or link IRQ"); - goto err_free_irq; - } - - /* Unmask interrupts on all CPUs */ - on_each_cpu(mvpp2_interrupts_unmask, port, 1); - mvpp2_shared_interrupt_mask_unmask(port, false); - - mvpp2_start_dev(port); - - if (priv->hw_version == MVPP22) - mvpp22_init_rss(port); - - /* Start hardware statistics gathering */ - queue_delayed_work(priv->stats_queue, &port->stats_work, - MVPP2_MIB_COUNTERS_STATS_DELAY); - - return 0; - -err_free_irq: - mvpp2_irqs_deinit(port); -err_cleanup_txqs: - mvpp2_cleanup_txqs(port); -err_cleanup_rxqs: - mvpp2_cleanup_rxqs(port); - return err; -} - -static int mvpp2_stop(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu; - int cpu; - - mvpp2_stop_dev(port); - - /* Mask interrupts on all CPUs */ - on_each_cpu(mvpp2_interrupts_mask, port, 1); - mvpp2_shared_interrupt_mask_unmask(port, true); - - if (port->phylink) - phylink_disconnect_phy(port->phylink); - if (port->link_irq) - free_irq(port->link_irq, port); - - mvpp2_irqs_deinit(port); - if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); - - hrtimer_cancel(&port_pcpu->tx_done_timer); - port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); - } - } - mvpp2_cleanup_rxqs(port); - mvpp2_cleanup_txqs(port); - - cancel_delayed_work_sync(&port->stats_work); - - return 0; -} - -static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, - struct netdev_hw_addr_list *list) -{ - struct netdev_hw_addr *ha; - int ret; - - netdev_hw_addr_list_for_each(ha, list) { - ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); - if (ret) - return ret; - } - - return 0; -} - -static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) -{ - if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - mvpp2_prs_vid_enable_filtering(port); - else - mvpp2_prs_vid_disable_filtering(port); - - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_UNI_CAST, enable); - - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, enable); -} - -static void mvpp2_set_rx_mode(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - - /* Clear the whole UC and MC list */ - mvpp2_prs_mac_del_all(port); - - if (dev->flags & IFF_PROMISC) { - mvpp2_set_rx_promisc(port, true); - return; - } - - mvpp2_set_rx_promisc(port, false); - - if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || - mvpp2_prs_mac_da_accept_list(port, &dev->uc)) - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_UNI_CAST, true); - - if (dev->flags & IFF_ALLMULTI) { - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, true); - return; - } - - if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || - mvpp2_prs_mac_da_accept_list(port, &dev->mc)) - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, true); -} - -static int mvpp2_set_mac_address(struct net_device *dev, void *p) -{ - const struct sockaddr *addr = p; - int err; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - err = mvpp2_prs_update_mac_da(dev, addr->sa_data); - if (err) { - /* Reconfigure parser accept the original MAC address */ - mvpp2_prs_update_mac_da(dev, dev->dev_addr); - netdev_err(dev, "failed to change MAC address\n"); - } - return err; -} - -static int mvpp2_change_mtu(struct net_device *dev, int mtu) -{ - struct mvpp2_port *port = netdev_priv(dev); - int err; - - if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { - netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, - ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); - mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); - } - - if (!netif_running(dev)) { - err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - return 0; - } - - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - } - - mvpp2_stop_dev(port); - - err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - goto out_start; - } - - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - -out_start: - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - - return 0; -log_error: - netdev_err(dev, "failed to change MTU\n"); - return err; -} - -static void -mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - struct mvpp2_port *port = netdev_priv(dev); - unsigned int start; - int cpu; - - for_each_possible_cpu(cpu) { - struct mvpp2_pcpu_stats *cpu_stats; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - - cpu_stats = per_cpu_ptr(port->stats, cpu); - do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); - rx_packets = cpu_stats->rx_packets; - rx_bytes = cpu_stats->rx_bytes; - tx_packets = cpu_stats->tx_packets; - tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); - - stats->rx_packets += rx_packets; - stats->rx_bytes += rx_bytes; - stats->tx_packets += tx_packets; - stats->tx_bytes += tx_bytes; - } - - stats->rx_errors = dev->stats.rx_errors; - stats->rx_dropped = dev->stats.rx_dropped; - stats->tx_dropped = dev->stats.tx_dropped; -} - -static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return -ENOTSUPP; - - return phylink_mii_ioctl(port->phylink, ifr, cmd); -} - -static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct mvpp2_port *port = netdev_priv(dev); - int ret; - - ret = mvpp2_prs_vid_entry_add(port, vid); - if (ret) - netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", - MVPP2_PRS_VLAN_FILT_MAX - 1); - return ret; -} - -static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct mvpp2_port *port = netdev_priv(dev); - - mvpp2_prs_vid_entry_remove(port, vid); - return 0; -} - -static int mvpp2_set_features(struct net_device *dev, - netdev_features_t features) -{ - netdev_features_t changed = dev->features ^ features; - struct mvpp2_port *port = netdev_priv(dev); - - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { - mvpp2_prs_vid_enable_filtering(port); - } else { - /* Invalidate all registered VID filters for this - * port - */ - mvpp2_prs_vid_remove_all(port); - - mvpp2_prs_vid_disable_filtering(port); - } - } - - return 0; -} - -/* Ethtool methods */ - -static int mvpp2_ethtool_nway_reset(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return -ENOTSUPP; - - return phylink_ethtool_nway_reset(port->phylink); -} - -/* Set interrupt coalescing for ethtools */ -static int mvpp2_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) -{ - struct mvpp2_port *port = netdev_priv(dev); - int queue; - - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq = port->rxqs[queue]; - - rxq->time_coal = c->rx_coalesce_usecs; - rxq->pkts_coal = c->rx_max_coalesced_frames; - mvpp2_rx_pkts_coal_set(port, rxq); - mvpp2_rx_time_coal_set(port, rxq); - } - - if (port->has_tx_irqs) { - port->tx_time_coal = c->tx_coalesce_usecs; - mvpp2_tx_time_coal_set(port); - } - - for (queue = 0; queue < port->ntxqs; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - txq->done_pkts_coal = c->tx_max_coalesced_frames; - - if (port->has_tx_irqs) - mvpp2_tx_pkts_coal_set(port, txq); - } - - return 0; -} - -/* get coalescing for ethtools */ -static int mvpp2_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) -{ - struct mvpp2_port *port = netdev_priv(dev); - - c->rx_coalesce_usecs = port->rxqs[0]->time_coal; - c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; - c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; - c->tx_coalesce_usecs = port->tx_time_coal; - return 0; -} - -static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, dev_name(&dev->dev), - sizeof(drvinfo->bus_info)); -} - -static void mvpp2_ethtool_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mvpp2_port *port = netdev_priv(dev); - - ring->rx_max_pending = MVPP2_MAX_RXD_MAX; - ring->tx_max_pending = MVPP2_MAX_TXD_MAX; - ring->rx_pending = port->rx_ring_size; - ring->tx_pending = port->tx_ring_size; -} - -static int mvpp2_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mvpp2_port *port = netdev_priv(dev); - u16 prev_rx_ring_size = port->rx_ring_size; - u16 prev_tx_ring_size = port->tx_ring_size; - int err; - - err = mvpp2_check_ringparam_valid(dev, ring); - if (err) - return err; - - if (!netif_running(dev)) { - port->rx_ring_size = ring->rx_pending; - port->tx_ring_size = ring->tx_pending; - return 0; - } - - /* The interface is running, so we have to force a - * reallocation of the queues - */ - mvpp2_stop_dev(port); - mvpp2_cleanup_rxqs(port); - mvpp2_cleanup_txqs(port); - - port->rx_ring_size = ring->rx_pending; - port->tx_ring_size = ring->tx_pending; - - err = mvpp2_setup_rxqs(port); - if (err) { - /* Reallocate Rx queues with the original ring size */ - port->rx_ring_size = prev_rx_ring_size; - ring->rx_pending = prev_rx_ring_size; - err = mvpp2_setup_rxqs(port); - if (err) - goto err_out; - } - err = mvpp2_setup_txqs(port); - if (err) { - /* Reallocate Tx queues with the original ring size */ - port->tx_ring_size = prev_tx_ring_size; - ring->tx_pending = prev_tx_ring_size; - err = mvpp2_setup_txqs(port); - if (err) - goto err_clean_rxqs; - } - - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - - return 0; - -err_clean_rxqs: - mvpp2_cleanup_rxqs(port); -err_out: - netdev_err(dev, "failed to change ring parameters"); - return err; -} - -static void mvpp2_ethtool_get_pause_param(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return; - - phylink_ethtool_get_pauseparam(port->phylink, pause); -} - -static int mvpp2_ethtool_set_pause_param(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return -ENOTSUPP; - - return phylink_ethtool_set_pauseparam(port->phylink, pause); -} - -static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return -ENOTSUPP; - - return phylink_ethtool_ksettings_get(port->phylink, cmd); -} - -static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phylink) - return -ENOTSUPP; - - return phylink_ethtool_ksettings_set(port->phylink, cmd); -} - -/* Device ops */ - -static const struct net_device_ops mvpp2_netdev_ops = { - .ndo_open = mvpp2_open, - .ndo_stop = mvpp2_stop, - .ndo_start_xmit = mvpp2_tx, - .ndo_set_rx_mode = mvpp2_set_rx_mode, - .ndo_set_mac_address = mvpp2_set_mac_address, - .ndo_change_mtu = mvpp2_change_mtu, - .ndo_get_stats64 = mvpp2_get_stats64, - .ndo_do_ioctl = mvpp2_ioctl, - .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, - .ndo_set_features = mvpp2_set_features, -}; - -static const struct ethtool_ops mvpp2_eth_tool_ops = { - .nway_reset = mvpp2_ethtool_nway_reset, - .get_link = ethtool_op_get_link, - .set_coalesce = mvpp2_ethtool_set_coalesce, - .get_coalesce = mvpp2_ethtool_get_coalesce, - .get_drvinfo = mvpp2_ethtool_get_drvinfo, - .get_ringparam = mvpp2_ethtool_get_ringparam, - .set_ringparam = mvpp2_ethtool_set_ringparam, - .get_strings = mvpp2_ethtool_get_strings, - .get_ethtool_stats = mvpp2_ethtool_get_stats, - .get_sset_count = mvpp2_ethtool_get_sset_count, - .get_pauseparam = mvpp2_ethtool_get_pause_param, - .set_pauseparam = mvpp2_ethtool_set_pause_param, - .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, - .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, -}; - -/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that - * had a single IRQ defined per-port. - */ -static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - struct mvpp2_queue_vector *v = &port->qvecs[0]; - - v->first_rxq = 0; - v->nrxqs = port->nrxqs; - v->type = MVPP2_QUEUE_VECTOR_SHARED; - v->sw_thread_id = 0; - v->sw_thread_mask = *cpumask_bits(cpu_online_mask); - v->port = port; - v->irq = irq_of_parse_and_map(port_node, 0); - if (v->irq <= 0) - return -EINVAL; - netif_napi_add(port->dev, &v->napi, mvpp2_poll, - NAPI_POLL_WEIGHT); - - port->nqvecs = 1; - - return 0; -} - -static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - struct mvpp2_queue_vector *v; - int i, ret; - - port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; - - for (i = 0; i < port->nqvecs; i++) { - char irqname[16]; - - v = port->qvecs + i; - - v->port = port; - v->type = MVPP2_QUEUE_VECTOR_PRIVATE; - v->sw_thread_id = i; - v->sw_thread_mask = BIT(i); - - snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); - - if (queue_mode == MVPP2_QDIST_MULTI_MODE) { - v->first_rxq = i * MVPP2_DEFAULT_RXQ; - v->nrxqs = MVPP2_DEFAULT_RXQ; - } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && - i == (port->nqvecs - 1)) { - v->first_rxq = 0; - v->nrxqs = port->nrxqs; - v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); - } - - if (port_node) - v->irq = of_irq_get_byname(port_node, irqname); - else - v->irq = fwnode_irq_get(port->fwnode, i); - if (v->irq <= 0) { - ret = -EINVAL; - goto err; - } - - netif_napi_add(port->dev, &v->napi, mvpp2_poll, - NAPI_POLL_WEIGHT); - } - - return 0; - -err: - for (i = 0; i < port->nqvecs; i++) - irq_dispose_mapping(port->qvecs[i].irq); - return ret; -} - -static int mvpp2_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - if (port->has_tx_irqs) - return mvpp2_multi_queue_vectors_init(port, port_node); - else - return mvpp2_simple_queue_vectors_init(port, port_node); -} - -static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) -{ - int i; - - for (i = 0; i < port->nqvecs; i++) - irq_dispose_mapping(port->qvecs[i].irq); -} - -/* Configure Rx queue group interrupt for this port */ -static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - int i; - - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - port->nrxqs); - return; - } - - /* Handle the more complicated PPv2.2 case */ - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - if (!qv->nrxqs) - continue; - - val = qv->sw_thread_id; - val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = qv->first_rxq; - val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } -} - -/* Initialize port HW */ -static int mvpp2_port_init(struct mvpp2_port *port) -{ - struct device *dev = port->dev->dev.parent; - struct mvpp2 *priv = port->priv; - struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; - - /* Checks for hardware constraints */ - if (port->first_rxq + port->nrxqs > - MVPP2_MAX_PORTS * priv->max_port_rxqs) - return -EINVAL; - - if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || - (port->ntxqs > MVPP2_MAX_TXQ)) - return -EINVAL; - - /* Disable port */ - mvpp2_egress_disable(port); - mvpp2_port_disable(port); - - port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; - - port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), - GFP_KERNEL); - if (!port->txqs) - return -ENOMEM; - - /* Associate physical Tx queues to this port and initialize. - * The mapping is predefined. - */ - for (queue = 0; queue < port->ntxqs; queue++) { - int queue_phy_id = mvpp2_txq_phys(port->id, queue); - struct mvpp2_tx_queue *txq; - - txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) { - err = -ENOMEM; - goto err_free_percpu; - } - - txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); - if (!txq->pcpu) { - err = -ENOMEM; - goto err_free_percpu; - } - - txq->id = queue_phy_id; - txq->log_id = queue; - txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; - } - - port->txqs[queue] = txq; - } - - port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), - GFP_KERNEL); - if (!port->rxqs) { - err = -ENOMEM; - goto err_free_percpu; - } - - /* Allocate and initialize Rx queue for this port */ - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq; - - /* Map physical Rx queue to port's logical Rx queue */ - rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); - if (!rxq) { - err = -ENOMEM; - goto err_free_percpu; - } - /* Map this Rx queue to a physical queue */ - rxq->id = port->first_rxq + queue; - rxq->port = port->id; - rxq->logic_rxq = queue; - - port->rxqs[queue] = rxq; - } - - mvpp2_rx_irqs_setup(port); - - /* Create Rx descriptor rings */ - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq = port->rxqs[queue]; - - rxq->size = port->rx_ring_size; - rxq->pkts_coal = MVPP2_RX_COAL_PKTS; - rxq->time_coal = MVPP2_RX_COAL_USEC; - } - - mvpp2_ingress_disable(port); - - /* Port default configuration */ - mvpp2_defaults_set(port); - - /* Port's classifier configuration */ - mvpp2_cls_oversize_rxq_set(port); - mvpp2_cls_port_config(port); - - /* Provide an initial Rx packet size */ - port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); - - /* Initialize pools for swf */ - err = mvpp2_swf_bm_pool_init(port); - if (err) - goto err_free_percpu; - - return 0; - -err_free_percpu: - for (queue = 0; queue < port->ntxqs; queue++) { - if (!port->txqs[queue]) - continue; - free_percpu(port->txqs[queue]->pcpu); - } - return err; -} - -/* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. - */ -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) -{ - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; - - if (priv->hw_version == MVPP21) - return false; - - for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) - return false; - } - - return true; -} - -static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, - struct fwnode_handle *fwnode, - char **mac_from) -{ - struct mvpp2_port *port = netdev_priv(dev); - char hw_mac_addr[ETH_ALEN] = {0}; - char fw_mac_addr[ETH_ALEN]; - - if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { - *mac_from = "firmware node"; - ether_addr_copy(dev->dev_addr, fw_mac_addr); - return; - } - - if (priv->hw_version == MVPP21) { - mvpp21_get_mac_address(port, hw_mac_addr); - if (is_valid_ether_addr(hw_mac_addr)) { - *mac_from = "hardware"; - ether_addr_copy(dev->dev_addr, hw_mac_addr); - return; - } - } - - *mac_from = "random"; - eth_hw_addr_random(dev); -} - -static void mvpp2_phylink_validate(struct net_device *dev, - unsigned long *supported, - struct phylink_link_state *state) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - switch (state->interface) { - case PHY_INTERFACE_MODE_10GKR: - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); - phylink_set(mask, 10000baseKR_Full); - /* Fall-through */ - default: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 10000baseT_Full); - /* Fall-through */ - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 2500baseX_Full); - } - - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static void mvpp22_xlg_link_state(struct mvpp2_port *port, - struct phylink_link_state *state) -{ - u32 val; - - state->speed = SPEED_10000; - state->duplex = 1; - state->an_complete = 1; - - val = readl(port->base + MVPP22_XLG_STATUS); - state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); - - state->pause = 0; - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) - state->pause |= MLO_PAUSE_TX; - if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) - state->pause |= MLO_PAUSE_RX; -} - -static void mvpp2_gmac_link_state(struct mvpp2_port *port, - struct phylink_link_state *state) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_STATUS0); - - state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); - state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); - state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_1000BASEX: - state->speed = SPEED_1000; - break; - case PHY_INTERFACE_MODE_2500BASEX: - state->speed = SPEED_2500; - break; - default: - if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) - state->speed = SPEED_1000; - else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) - state->speed = SPEED_100; - else - state->speed = SPEED_10; - } - - state->pause = 0; - if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) - state->pause |= MLO_PAUSE_RX; - if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) - state->pause |= MLO_PAUSE_TX; -} - -static int mvpp2_phylink_mac_link_state(struct net_device *dev, - struct phylink_link_state *state) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { - u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); - mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - - if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { - mvpp22_xlg_link_state(port, state); - return 1; - } - } - - mvpp2_gmac_link_state(port, state); - return 1; -} - -static void mvpp2_mac_an_restart(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - u32 val; - - if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) - return; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - /* The RESTART_AN bit is cleared by the h/w after restarting the AN - * process. - */ - val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, - const struct phylink_link_state *state) -{ - u32 ctrl0, ctrl4; - - ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); - ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); - - if (state->pause & MLO_PAUSE_TX) - ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; - if (state->pause & MLO_PAUSE_RX) - ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - - ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; - ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | - MVPP22_XLG_CTRL4_EN_IDLE_CHECK; - - writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); - writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); -} - -static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, - const struct phylink_link_state *state) -{ - u32 an, ctrl0, ctrl2, ctrl4; - - an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - - /* Force link down */ - an &= ~MVPP2_GMAC_FORCE_LINK_PASS; - an |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - /* Set the GMAC in a reset state */ - ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); - - an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | - MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | - MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | - MVPP2_GMAC_FORCE_LINK_DOWN); - ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; - ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); - - if (state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) { - /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can - * they negotiate duplex: they are always operating with a fixed - * speed of 1000/2500Mbps in full duplex, so force 1000/2500 - * speed and full duplex here. - */ - ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; - an |= MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX; - } else if (!phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; - } - - if (state->duplex) - an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - if (phylink_test(state->advertising, Pause)) - an |= MVPP2_GMAC_FC_ADV_EN; - if (phylink_test(state->advertising, Asym_Pause)) - an |= MVPP2_GMAC_FC_ADV_ASM_EN; - - if (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG; - ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; - - ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); - ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_DP_CLK_SEL | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - - if (state->pause & MLO_PAUSE_TX) - ctrl4 |= MVPP22_CTRL4_TX_FC_EN; - if (state->pause & MLO_PAUSE_RX) - ctrl4 |= MVPP22_CTRL4_RX_FC_EN; - } else if (phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; - - if (state->speed == SPEED_1000) - an |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (state->speed == SPEED_100) - an |= MVPP2_GMAC_CONFIG_MII_SPEED; - - ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; - ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - } - - writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); - writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, - const struct phylink_link_state *state) -{ - struct mvpp2_port *port = netdev_priv(dev); - - /* Check for invalid configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { - netdev_err(dev, "Invalid mode on %s\n", dev->name); - return; - } - - netif_tx_stop_all_queues(port->dev); - if (!port->has_phy) - netif_carrier_off(port->dev); - - /* Make sure the port is disabled when reconfiguring the mode */ - mvpp2_port_disable(port); - - if (port->priv->hw_version == MVPP22 && - port->phy_interface != state->interface) { - port->phy_interface = state->interface; - - /* Reconfigure the serdes lanes */ - phy_power_off(port->comphy); - mvpp22_mode_reconfigure(port); - } - - /* mac (re)configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR) - mvpp2_xlg_config(port, mode, state); - else if (phy_interface_mode_is_rgmii(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) - mvpp2_gmac_config(port, mode, state); - - if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port, state); - - /* If the port already was up, make sure it's still in the same state */ - if (state->link || !port->has_phy) { - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - if (!port->has_phy) - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } -} - -static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, - phy_interface_t interface, struct phy_device *phy) -{ - struct mvpp2_port *port = netdev_priv(dev); - u32 val; - - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; - if (phy_interface_mode_is_rgmii(interface)) - val |= MVPP2_GMAC_FORCE_LINK_PASS; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - } - - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - netif_tx_wake_all_queues(dev); -} - -static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, - phy_interface_t interface) -{ - struct mvpp2_port *port = netdev_priv(dev); - u32 val; - - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_PASS; - val |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - } - - netif_tx_stop_all_queues(dev); - mvpp2_egress_disable(port); - mvpp2_ingress_disable(port); - - /* When using link interrupts to notify phylink of a MAC state change, - * we do not want the port to be disabled (we want to receive further - * interrupts, to be notified when the port will have a link later). - */ - if (!port->has_phy) - return; - - mvpp2_port_disable(port); -} - -static const struct phylink_mac_ops mvpp2_phylink_ops = { - .validate = mvpp2_phylink_validate, - .mac_link_state = mvpp2_phylink_mac_link_state, - .mac_an_restart = mvpp2_mac_an_restart, - .mac_config = mvpp2_mac_config, - .mac_link_up = mvpp2_mac_link_up, - .mac_link_down = mvpp2_mac_link_down, -}; - -/* Ports initialization */ -static int mvpp2_port_probe(struct platform_device *pdev, - struct fwnode_handle *port_fwnode, - struct mvpp2 *priv) -{ - struct phy *comphy = NULL; - struct mvpp2_port *port; - struct mvpp2_port_pcpu *port_pcpu; - struct device_node *port_node = to_of_node(port_fwnode); - struct net_device *dev; - struct resource *res; - struct phylink *phylink; - char *mac_from = ""; - unsigned int ntxqs, nrxqs; - bool has_tx_irqs; - u32 id; - int features; - int phy_mode; - int err, i, cpu; - - if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; - } - - if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - - ntxqs = MVPP2_MAX_TXQ; - if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) - nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); - else - nrxqs = MVPP2_DEFAULT_RXQ; - - dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); - if (!dev) - return -ENOMEM; - - phy_mode = fwnode_get_phy_mode(port_fwnode); - if (phy_mode < 0) { - dev_err(&pdev->dev, "incorrect phy mode\n"); - err = phy_mode; - goto err_free_netdev; - } - - if (port_node) { - comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); - if (IS_ERR(comphy)) { - if (PTR_ERR(comphy) == -EPROBE_DEFER) { - err = -EPROBE_DEFER; - goto err_free_netdev; - } - comphy = NULL; - } - } - - if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { - err = -EINVAL; - dev_err(&pdev->dev, "missing port-id value\n"); - goto err_free_netdev; - } - - dev->tx_queue_len = MVPP2_MAX_TXD_MAX; - dev->watchdog_timeo = 5 * HZ; - dev->netdev_ops = &mvpp2_netdev_ops; - dev->ethtool_ops = &mvpp2_eth_tool_ops; - - port = netdev_priv(dev); - port->dev = dev; - port->fwnode = port_fwnode; - port->has_phy = !!of_find_property(port_node, "phy", NULL); - port->ntxqs = ntxqs; - port->nrxqs = nrxqs; - port->priv = priv; - port->has_tx_irqs = has_tx_irqs; - - err = mvpp2_queue_vectors_init(port, port_node); - if (err) - goto err_free_netdev; - - if (port_node) - port->link_irq = of_irq_get_byname(port_node, "link"); - else - port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); - if (port->link_irq == -EPROBE_DEFER) { - err = -EPROBE_DEFER; - goto err_deinit_qvecs; - } - if (port->link_irq <= 0) - /* the link irq is optional */ - port->link_irq = 0; - - if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) - port->flags |= MVPP2_F_LOOPBACK; - - port->id = id; - if (priv->hw_version == MVPP21) - port->first_rxq = port->id * port->nrxqs; - else - port->first_rxq = port->id * priv->max_port_rxqs; - - port->of_node = port_node; - port->phy_interface = phy_mode; - port->comphy = comphy; - - if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); - port->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(port->base)) { - err = PTR_ERR(port->base); - goto err_free_irq; - } - - port->stats_base = port->priv->lms_base + - MVPP21_MIB_COUNTERS_OFFSET + - port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; - } else { - if (fwnode_property_read_u32(port_fwnode, "gop-port-id", - &port->gop_id)) { - err = -EINVAL; - dev_err(&pdev->dev, "missing gop-port-id value\n"); - goto err_deinit_qvecs; - } - - port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); - port->stats_base = port->priv->iface_base + - MVPP22_MIB_COUNTERS_OFFSET + - port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; - } - - /* Alloc per-cpu and ethtool stats */ - port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); - if (!port->stats) { - err = -ENOMEM; - goto err_free_irq; - } - - port->ethtool_stats = devm_kcalloc(&pdev->dev, - ARRAY_SIZE(mvpp2_ethtool_regs), - sizeof(u64), GFP_KERNEL); - if (!port->ethtool_stats) { - err = -ENOMEM; - goto err_free_stats; - } - - mutex_init(&port->gather_stats_lock); - INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); - - mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); - - port->tx_ring_size = MVPP2_MAX_TXD_DFLT; - port->rx_ring_size = MVPP2_MAX_RXD_DFLT; - SET_NETDEV_DEV(dev, &pdev->dev); - - err = mvpp2_port_init(port); - if (err < 0) { - dev_err(&pdev->dev, "failed to init port %d\n", id); - goto err_free_stats; - } - - mvpp2_port_periodic_xon_disable(port); - - mvpp2_port_reset(port); - - port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); - if (!port->pcpu) { - err = -ENOMEM; - goto err_free_txq_pcpu; - } - - if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); - - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; - port_pcpu->timer_scheduled = false; - - tasklet_init(&port_pcpu->tx_done_tasklet, - mvpp2_tx_proc_cb, - (unsigned long)dev); - } - } - - features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO; - dev->features = features | NETIF_F_RXCSUM; - dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | - NETIF_F_HW_VLAN_CTAG_FILTER; - - if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - } - - dev->vlan_features |= features; - dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; - dev->priv_flags |= IFF_UNICAST_FLT; - - /* MTU range: 68 - 9704 */ - dev->min_mtu = ETH_MIN_MTU; - /* 9704 == 9728 - 20 and rounding to 8 */ - dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; - - /* Phylink isn't used w/ ACPI as of now */ - if (port_node) { - phylink = phylink_create(dev, port_fwnode, phy_mode, - &mvpp2_phylink_ops); - if (IS_ERR(phylink)) { - err = PTR_ERR(phylink); - goto err_free_port_pcpu; - } - port->phylink = phylink; - } else { - port->phylink = NULL; - } - - err = register_netdev(dev); - if (err < 0) { - dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_phylink; - } - netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - - priv->port_list[priv->port_count++] = port; - - return 0; - -err_phylink: - if (port->phylink) - phylink_destroy(port->phylink); -err_free_port_pcpu: - free_percpu(port->pcpu); -err_free_txq_pcpu: - for (i = 0; i < port->ntxqs; i++) - free_percpu(port->txqs[i]->pcpu); -err_free_stats: - free_percpu(port->stats); -err_free_irq: - if (port->link_irq) - irq_dispose_mapping(port->link_irq); -err_deinit_qvecs: - mvpp2_queue_vectors_deinit(port); -err_free_netdev: - free_netdev(dev); - return err; -} - -/* Ports removal routine */ -static void mvpp2_port_remove(struct mvpp2_port *port) -{ - int i; - - unregister_netdev(port->dev); - if (port->phylink) - phylink_destroy(port->phylink); - free_percpu(port->pcpu); - free_percpu(port->stats); - for (i = 0; i < port->ntxqs; i++) - free_percpu(port->txqs[i]->pcpu); - mvpp2_queue_vectors_deinit(port); - if (port->link_irq) - irq_dispose_mapping(port->link_irq); - free_netdev(port->dev); -} - -/* Initialize decoding windows */ -static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, - struct mvpp2 *priv) -{ - u32 win_enable; - int i; - - for (i = 0; i < 6; i++) { - mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); - mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); - - if (i < 4) - mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); - } - - win_enable = 0; - - for (i = 0; i < dram->num_cs; i++) { - const struct mbus_dram_window *cs = dram->cs + i; - - mvpp2_write(priv, MVPP2_WIN_BASE(i), - (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | - dram->mbus_dram_target_id); - - mvpp2_write(priv, MVPP2_WIN_SIZE(i), - (cs->size - 1) & 0xffff0000); - - win_enable |= (1 << i); - } - - mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); -} - -/* Initialize Rx FIFO's */ -static void mvpp2_rx_fifo_init(struct mvpp2 *priv) -{ - int port; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); - } - - mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, - MVPP2_RX_FIFO_PORT_MIN_PKT); - mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); -} - -static void mvpp22_rx_fifo_init(struct mvpp2 *priv) -{ - int port; - - /* The FIFO size parameters are set depending on the maximum speed a - * given port can handle: - * - Port 0: 10Gbps - * - Port 1: 2.5Gbps - * - Ports 2 and 3: 1Gbps - */ - - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); - - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); - - for (port = 2; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); - } - - mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, - MVPP2_RX_FIFO_PORT_MIN_PKT); - mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); -} - -/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G - * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, - * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. - */ -static void mvpp22_tx_fifo_init(struct mvpp2 *priv) -{ - int port, size, thrs; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - if (port == 0) { - size = MVPP22_TX_FIFO_DATA_SIZE_10KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; - } else { - size = MVPP22_TX_FIFO_DATA_SIZE_3KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; - } - mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); - mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); - } -} - -static void mvpp2_axi_init(struct mvpp2 *priv) -{ - u32 val, rdval, wrval; - - mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); - - /* AXI Bridge Configuration */ - - rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE - << MVPP22_AXI_ATTR_CACHE_OFFS; - rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_ATTR_DOMAIN_OFFS; - - wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE - << MVPP22_AXI_ATTR_CACHE_OFFS; - wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_ATTR_DOMAIN_OFFS; - - /* BM */ - mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); - mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); - - /* Descriptors */ - mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); - mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); - - /* Buffer Data */ - mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); - - val = MVPP22_AXI_CODE_CACHE_NON_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); - mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); - - val = MVPP22_AXI_CODE_CACHE_RD_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - - mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); - - val = MVPP22_AXI_CODE_CACHE_WR_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - - mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); -} - -/* Initialize network controller common part HW */ -static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - const struct mbus_dram_target_info *dram_target_info; - int err, i; - u32 val; - - /* MBUS windows configuration */ - dram_target_info = mv_mbus_dram_info(); - if (dram_target_info) - mvpp2_conf_mbus_windows(dram_target_info, priv); - - if (priv->hw_version == MVPP22) - mvpp2_axi_init(priv); - - /* Disable HW PHY polling */ - if (priv->hw_version == MVPP21) { - val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - val |= MVPP2_PHY_AN_STOP_SMI0_MASK; - writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - } else { - val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); - val &= ~MVPP22_SMI_POLLING_EN; - writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); - } - - /* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), - sizeof(*priv->aggr_txqs), - GFP_KERNEL); - if (!priv->aggr_txqs) - return -ENOMEM; - - for_each_present_cpu(i) { - priv->aggr_txqs[i].id = i; - priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; - err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); - if (err < 0) - return err; - } - - /* Fifo Init */ - if (priv->hw_version == MVPP21) { - mvpp2_rx_fifo_init(priv); - } else { - mvpp22_rx_fifo_init(priv); - mvpp22_tx_fifo_init(priv); - } - - if (priv->hw_version == MVPP21) - writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, - priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); - - /* Allow cache snoop when transmiting packets */ - mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); - - /* Buffer Manager initialization */ - err = mvpp2_bm_init(pdev, priv); - if (err < 0) - return err; - - /* Parser default initialization */ - err = mvpp2_prs_default_init(pdev, priv); - if (err < 0) - return err; - - /* Classifier default initialization */ - mvpp2_cls_init(priv); - - return 0; -} - -static int mvpp2_probe(struct platform_device *pdev) -{ - const struct acpi_device_id *acpi_id; - struct fwnode_handle *fwnode = pdev->dev.fwnode; - struct fwnode_handle *port_fwnode; - struct mvpp2 *priv; - struct resource *res; - void __iomem *base; - int i; - int err; - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - if (has_acpi_companion(&pdev->dev)) { - acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, - &pdev->dev); - priv->hw_version = (unsigned long)acpi_id->driver_data; - } else { - priv->hw_version = - (unsigned long)of_device_get_match_data(&pdev->dev); - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->lms_base)) - return PTR_ERR(priv->lms_base); - } else { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (has_acpi_companion(&pdev->dev)) { - /* In case the MDIO memory region is declared in - * the ACPI, it can already appear as 'in-use' - * in the OS. Because it is overlapped by second - * region of the network controller, make - * sure it is released, before requesting it again. - * The care is taken by mvpp2 driver to avoid - * concurrent access to this memory region. - */ - release_resource(res); - } - priv->iface_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->iface_base)) - return PTR_ERR(priv->iface_base); - } - - if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { - priv->sysctrl_base = - syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "marvell,system-controller"); - if (IS_ERR(priv->sysctrl_base)) - /* The system controller regmap is optional for dt - * compatibility reasons. When not provided, the - * configuration of the GoP relies on the - * firmware/bootloader. - */ - priv->sysctrl_base = NULL; - } - - mvpp2_setup_bm_pool(); - - for (i = 0; i < MVPP2_MAX_THREADS; i++) { - u32 addr_space_sz; - - addr_space_sz = (priv->hw_version == MVPP21 ? - MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); - priv->swth_base[i] = base + i * addr_space_sz; - } - - if (priv->hw_version == MVPP21) - priv->max_port_rxqs = 8; - else - priv->max_port_rxqs = 32; - - if (dev_of_node(&pdev->dev)) { - priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); - if (IS_ERR(priv->pp_clk)) - return PTR_ERR(priv->pp_clk); - err = clk_prepare_enable(priv->pp_clk); - if (err < 0) - return err; - - priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); - if (IS_ERR(priv->gop_clk)) { - err = PTR_ERR(priv->gop_clk); - goto err_pp_clk; - } - err = clk_prepare_enable(priv->gop_clk); - if (err < 0) - goto err_pp_clk; - - if (priv->hw_version == MVPP22) { - priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); - if (IS_ERR(priv->mg_clk)) { - err = PTR_ERR(priv->mg_clk); - goto err_gop_clk; - } - - err = clk_prepare_enable(priv->mg_clk); - if (err < 0) - goto err_gop_clk; - - priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); - if (IS_ERR(priv->mg_core_clk)) { - priv->mg_core_clk = NULL; - } else { - err = clk_prepare_enable(priv->mg_core_clk); - if (err < 0) - goto err_mg_clk; - } - } - - priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); - if (IS_ERR(priv->axi_clk)) { - err = PTR_ERR(priv->axi_clk); - if (err == -EPROBE_DEFER) - goto err_mg_core_clk; - priv->axi_clk = NULL; - } else { - err = clk_prepare_enable(priv->axi_clk); - if (err < 0) - goto err_mg_core_clk; - } - - /* Get system's tclk rate */ - priv->tclk = clk_get_rate(priv->pp_clk); - } else if (device_property_read_u32(&pdev->dev, "clock-frequency", - &priv->tclk)) { - dev_err(&pdev->dev, "missing clock-frequency value\n"); - return -EINVAL; - } - - if (priv->hw_version == MVPP22) { - err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); - if (err) - goto err_axi_clk; - /* Sadly, the BM pools all share the same register to - * store the high 32 bits of their address. So they - * must all have the same high 32 bits, which forces - * us to restrict coherent memory to DMA_BIT_MASK(32). - */ - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) - goto err_axi_clk; - } - - /* Initialize network controller */ - err = mvpp2_init(pdev, priv); - if (err < 0) { - dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_axi_clk; - } - - /* Initialize ports */ - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - err = mvpp2_port_probe(pdev, port_fwnode, priv); - if (err < 0) - goto err_port_probe; - } - - if (priv->port_count == 0) { - dev_err(&pdev->dev, "no ports enabled\n"); - err = -ENODEV; - goto err_axi_clk; - } - - /* Statistics must be gathered regularly because some of them (like - * packets counters) are 32-bit registers and could overflow quite - * quickly. For instance, a 10Gb link used at full bandwidth with the - * smallest packets (64B) will overflow a 32-bit counter in less than - * 30 seconds. Then, use a workqueue to fill 64-bit counters. - */ - snprintf(priv->queue_name, sizeof(priv->queue_name), - "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), - priv->port_count > 1 ? "+" : ""); - priv->stats_queue = create_singlethread_workqueue(priv->queue_name); - if (!priv->stats_queue) { - err = -ENOMEM; - goto err_port_probe; - } - - platform_set_drvdata(pdev, priv); - return 0; - -err_port_probe: - i = 0; - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) - mvpp2_port_remove(priv->port_list[i]); - i++; - } -err_axi_clk: - clk_disable_unprepare(priv->axi_clk); - -err_mg_core_clk: - if (priv->hw_version == MVPP22) - clk_disable_unprepare(priv->mg_core_clk); -err_mg_clk: - if (priv->hw_version == MVPP22) - clk_disable_unprepare(priv->mg_clk); -err_gop_clk: - clk_disable_unprepare(priv->gop_clk); -err_pp_clk: - clk_disable_unprepare(priv->pp_clk); - return err; -} - -static int mvpp2_remove(struct platform_device *pdev) -{ - struct mvpp2 *priv = platform_get_drvdata(pdev); - struct fwnode_handle *fwnode = pdev->dev.fwnode; - struct fwnode_handle *port_fwnode; - int i = 0; - - flush_workqueue(priv->stats_queue); - destroy_workqueue(priv->stats_queue); - - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) { - mutex_destroy(&priv->port_list[i]->gather_stats_lock); - mvpp2_port_remove(priv->port_list[i]); - } - i++; - } - - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; - - mvpp2_bm_pool_destroy(pdev, priv, bm_pool); - } - - for_each_present_cpu(i) { - struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; - - dma_free_coherent(&pdev->dev, - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, - aggr_txq->descs, - aggr_txq->descs_dma); - } - - if (is_acpi_node(port_fwnode)) - return 0; - - clk_disable_unprepare(priv->axi_clk); - clk_disable_unprepare(priv->mg_core_clk); - clk_disable_unprepare(priv->mg_clk); - clk_disable_unprepare(priv->pp_clk); - clk_disable_unprepare(priv->gop_clk); - - return 0; -} - -static const struct of_device_id mvpp2_match[] = { - { - .compatible = "marvell,armada-375-pp2", - .data = (void *)MVPP21, - }, - { - .compatible = "marvell,armada-7k-pp22", - .data = (void *)MVPP22, - }, - { } -}; -MODULE_DEVICE_TABLE(of, mvpp2_match); - -static const struct acpi_device_id mvpp2_acpi_match[] = { - { "MRVL0110", MVPP22 }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); - -static struct platform_driver mvpp2_driver = { - .probe = mvpp2_probe, - .remove = mvpp2_remove, - .driver = { - .name = MVPP2_DRIVER_NAME, - .of_match_table = mvpp2_match, - .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), - }, -}; - -module_platform_driver(mvpp2_driver); - -MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); -MODULE_AUTHOR("Marcin Wojtas "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile new file mode 100644 index 000000000000..4d11dd9e3246 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Marvell PPv2 driver. +# +obj-$(CONFIG_MVPP2) := mvpp2.o + +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h new file mode 100644 index 000000000000..def00dc3eb4e --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -0,0 +1,1046 @@ +/* + * Definitions for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#ifndef _MVPP2_H_ +#define _MVPP2_H_ + +#include +#include +#include +#include + +/* Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) +#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) + +/* RSS Registers */ +#define MVPP22_RSS_INDEX 0x1500 +#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) +#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 +#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_WIDTH 0x150c + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_STATUS0 0x10 +#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) +#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) +#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) +#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_INT_STAT 0x20 +#define MVPP22_GMAC_INT_STAT_LINK BIT(1) +#define MVPP22_GMAC_INT_MASK 0x24 +#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_RX_FC_EN BIT(3) +#define MVPP22_CTRL4_TX_FC_EN BIT(4) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_GMAC_INT_SUM_MASK 0xa4 +#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff +#define MVPP22_XLG_STATUS 0x10c +#define MVPP22_XLG_STATUS_LINK_UP BIT(0) +#define MVPP22_XLG_INT_STAT 0x114 +#define MVPP22_XLG_INT_STAT_LINK BIT(1) +#define MVPP22_XLG_INT_MASK 0x118 +#define MVPP22_XLG_INT_MASK_LINK BIT(1) +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_EXT_INT_MASK 0x15c +#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) +#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) + +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 64 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO + * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), + * multiply this value by two to count the maximum number of skb descs needed. + */ +#define MVPP2_MAX_TSO_SEGS 300 +#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* Dfault number of RXQs in use */ +#define MVPP2_DEFAULT_RXQ 4 + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD_MAX 1024 +#define MVPP2_MAX_RXD_DFLT 128 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD_MAX 2048 +#define MVPP2_MAX_TXD_DFLT 1024 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* TX FIFO constants */ +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) + +/* RX buffer constants */ +#define MVPP2_SKB_SHINFO_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +#define MVPP2_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, cache_line_size()) + +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) +#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ + ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; + +/* BM constants */ +#define MVPP2_BM_JUMBO_BUF_NUM 512 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +#define MVPP2_BM_SHORT_FRAME_SIZE 512 +#define MVPP2_BM_LONG_FRAME_SIZE 2048 +#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) + +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS + +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + +/* Definitions */ + +/* Shared Packet Processor resources */ +struct mvpp2 { + /* Shared registers' base addresses */ + void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. + */ + void __iomem *swth_base[MVPP2_MAX_THREADS]; + + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; + + /* Common clocks */ + struct clk *pp_clk; + struct clk *gop_clk; + struct clk *mg_clk; + struct clk *mg_core_clk; + struct clk *axi_clk; + + /* List of pointers to port structures */ + int port_count; + struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + + /* Aggregated TXQs */ + struct mvpp2_tx_queue *aggr_txqs; + + /* BM pools */ + struct mvpp2_bm_pool *bm_pools; + + /* PRS shadow table */ + struct mvpp2_prs_shadow *prs_shadow; + /* PRS auxiliary table for double vlan entries control */ + bool *prs_double_vlans; + + /* Tclk value */ + u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics */ + char queue_name[30]; + struct workqueue_struct *stats_queue; +}; + +struct mvpp2_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + +struct mvpp2_port { + u8 id; + + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + + int link_irq; + + struct mvpp2 *priv; + + /* Firmware node associated to the port */ + struct fwnode_handle *fwnode; + + /* Is a PHY always connected to the port */ + bool has_phy; + + /* Per-port registers' base address */ + void __iomem *base; + void __iomem *stats_base; + + struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; + struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; + struct net_device *dev; + + int pkt_size; + + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + + /* Flags */ + unsigned long flags; + + u16 tx_ring_size; + u16 rx_ring_size; + struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; + + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + + struct device_node *of_node; + + phy_interface_t phy_interface; + struct phylink *phylink; + struct phy *comphy; + + struct mvpp2_bm_pool *pool_long; + struct mvpp2_bm_pool *pool_short; + + /* Index of first port's physical RXQ */ + u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; +}; + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u8 packet_offset; /* the offset from the buffer beginning */ + u8 phys_txq; /* destination queue ID */ + u16 data_size; /* data size of transmitted packet in bytes */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ + u32 buf_cookie; /* cookie for access to TX buffer in tx path */ + u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + u32 reserved2; /* reserved (for future use) */ +}; + +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { + u32 status; /* info about received packet */ + u16 reserved1; /* parser_info (for future use, PnC) */ + u16 data_size; /* size of received packet in bytes */ + u32 buf_dma_addr; /* physical address of the buffer */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved2; /* gem_port_id (for future use, PON) */ + u16 reserved3; /* csum_l4 (for future use, PnC) */ + u8 reserved4; /* bm_qset (for future use, BM) */ + u8 reserved5; + u16 reserved6; /* classify_info (for future use, PnC) */ + u32 reserved7; /* flow_id (for future use, PnC) */ + u32 reserved8; +}; + +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + +struct mvpp2_txq_pcpu_buf { + /* Transmitted SKB */ + struct sk_buff *skb; + + /* Physical address of transmitted buffer */ + dma_addr_t dma; + + /* Size transmitted */ + size_t size; +}; + +/* Per-CPU Tx queue control */ +struct mvpp2_txq_pcpu { + int cpu; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the + * descriptor ring + */ + int count; + + int wake_threshold; + int stop_threshold; + + /* Number of Tx DMA descriptors reserved for each CPU */ + int reserved_num; + + /* Infos about transmitted buffers */ + struct mvpp2_txq_pcpu_buf *buffs; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + +struct mvpp2_tx_queue { + /* Physical number of this Tx queue */ + u8 id; + + /* Logical number of this Tx queue */ + u8 log_id; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the descriptor ring */ + int count; + + /* Per-CPU control of physical Tx queues */ + struct mvpp2_txq_pcpu __percpu *pcpu; + + u32 done_pkts_coal; + + /* Virtual address of thex Tx DMA descriptors array */ + struct mvpp2_tx_desc *descs; + + /* DMA address of the Tx DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last Tx DMA descriptor */ + int last_desc; + + /* Index of the next Tx DMA descriptor to process */ + int next_desc_to_proc; +}; + +struct mvpp2_rx_queue { + /* RX queue number, in the range 0-31 for physical RXQs */ + u8 id; + + /* Num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvpp2_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* ID of port to which physical RXQ is mapped */ + int port; + + /* Port's logic RXQ number to which physical RXQ is mapped */ + int logic_rxq; +}; + +struct mvpp2_bm_pool { + /* Pool number in the range 0-7 */ + int id; + + /* Buffer Pointers Pool External (BPPE) size */ + int size; + /* BPPE size in bytes */ + int size_bytes; + /* Number of buffers for this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + int frag_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; + + /* Ports using BM pool */ + u32 port_map; +}; + +#define IS_TSO_HEADER(txq_pcpu, addr) \ + ((addr) >= (txq_pcpu)->tso_headers_dma && \ + (addr) < (txq_pcpu)->tso_headers_dma + \ + (txq_pcpu)->size * TSO_HEADER_SIZE) + +#define MVPP2_DRIVER_NAME "mvpp2" +#define MVPP2_DRIVER_VERSION "1.0" + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); +u32 mvpp2_read(struct mvpp2 *priv, u32 offset); + +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset); + +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); + +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, + u32 data); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c new file mode 100644 index 000000000000..8581d5b17dd5 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -0,0 +1,141 @@ +/* + * RSS and Classifier helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include "mvpp2.h" +#include "mvpp2_cls.h" + +/* Update classification flow table registers */ +static void mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +/* Update classification lookup table register */ +static void mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Classifier default initialization */ +void mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + int index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + memset(&fe.data, 0, sizeof(fe.data)); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } +} + +void mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + u32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); +} + +/* Set CPU queue number for oversize packets */ +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +} + +void mvpp22_init_rss(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int i; + + /* Set the table width: replace the whole classifier Rx queue number + * with the ones configured in RSS table entries. + */ + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); + + /* Loop through the classifier Rx Queues and map them to a RSS table. + * Map them all to the first table (0) by default. + */ + for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); + mvpp2_write(priv, MVPP22_RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(0)); + } + + /* Configure the first table to evenly distribute the packets across + * real Rx Queues. The table entries map a hash to an port Rx Queue. + */ + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(0) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); + } + +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h new file mode 100644 index 000000000000..8e1d7f9ffa0b --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -0,0 +1,44 @@ +/* + * RSS and Classifier definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVPP2_CLS_H_ +#define _MVPP2_CLS_H_ + +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 +#define MVPP2_CLS_RX_QUEUES 256 + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 + +struct mvpp2_cls_flow_entry { + u32 index; + u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + u32 lkpid; + u32 way; + u32 data; +}; + +void mvpp22_init_rss(struct mvpp2_port *port); + +void mvpp2_cls_init(struct mvpp2 *priv); + +void mvpp2_cls_port_config(struct mvpp2_port *port); + +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c new file mode 100644 index 000000000000..45622bffd81a --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -0,0 +1,5281 @@ +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +enum mvpp2_bm_pool_log_num { + MVPP2_BM_SHORT, + MVPP2_BM_LONG, + MVPP2_BM_JUMBO, + MVPP2_BM_POOLS_NUM +}; + +static struct { + int pkt_size; + int buf_num; +} mvpp2_pools[MVPP2_BM_POOLS_NUM]; + +/* The prototype is added here to be used in start_dev when using ACPI. This + * will be removed once phylink is used for all modes (dt+ACPI). + */ +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state); + +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 + +static int queue_mode = MVPP2_QDIST_SINGLE_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); + +/* Utility/helper methods */ + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->swth_base[0] + offset); +} + +u32 mvpp2_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->swth_base[0] + offset); +} + +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +{ + return readl_relaxed(priv->swth_base[0] + offset); +} +/* These accessors should be used to access: + * + * - per-CPU registers, where each CPU has its own copy of the + * register. + * + * MVPP2_BM_VIRT_ALLOC_REG + * MVPP2_BM_ADDR_HIGH_ALLOC + * MVPP22_BM_ADDR_HIGH_RLS_REG + * MVPP2_BM_VIRT_RLS_REG + * MVPP2_ISR_RX_TX_CAUSE_REG + * MVPP2_ISR_RX_TX_MASK_REG + * MVPP2_TXQ_NUM_REG + * MVPP2_AGGR_TXQ_UPDATE_REG + * MVPP2_TXQ_RSVD_REQ_REG + * MVPP2_TXQ_RSVD_RSLT_REG + * MVPP2_TXQ_SENT_REG + * MVPP2_RXQ_NUM_REG + * + * - global registers that must be accessed through a specific CPU + * window, because they are related to an access to a per-CPU + * register + * + * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) + * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) + * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + */ +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel(data, priv->swth_base[cpu] + offset); +} + +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl(priv->swth_base[cpu] + offset); +} + +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel_relaxed(data, priv->swth_base[cpu] + offset); +} + +u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl_relaxed(priv->swth_base[cpu] + offset); +} + +static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.buf_dma_addr; + else + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; +} + +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + dma_addr_t addr, offset; + + addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; + offset = dma_addr & MVPP2_TX_DESC_ALIGN; + + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.packet_offset = offset; + } else { + u64 val = (u64)addr; + + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp |= val; + tx_desc->pp22.packet_offset = offset; + } +} + +static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.data_size; + else + return tx_desc->pp22.data_size; +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = size; + else + tx_desc->pp22.data_size = size; +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = command; + else + tx_desc->pp22.command = command; +} + +static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.packet_offset; + else + return tx_desc->pp22.packet_offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_dma_addr; + else + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_cookie; + else + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.data_size; + else + return rx_desc->pp22.data_size; +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.status; + else + return rx_desc->pp22.status; +} + +static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) +{ + txq_pcpu->txq_get_index++; + if (txq_pcpu->txq_get_index == txq_pcpu->size) + txq_pcpu->txq_get_index = 0; +} + +static void mvpp2_txq_inc_put(struct mvpp2_port *port, + struct mvpp2_txq_pcpu *txq_pcpu, + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) +{ + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_put_index; + tx_buf->skb = skb; + tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); + tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + + mvpp2_txdesc_offset_get(port, tx_desc); + txq_pcpu->txq_put_index++; + if (txq_pcpu->txq_put_index == txq_pcpu->size) + txq_pcpu->txq_put_index = 0; +} + +/* Get number of physical egress port */ +static inline int mvpp2_egress_port(struct mvpp2_port *port) +{ + return MVPP2_MAX_TCONT + port->id; +} + +/* Get number of physical TXQ */ +static inline int mvpp2_txq_phys(int port, int txq) +{ + return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; +} + +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + else + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + +/* Buffer Manager configuration routines */ + +/* Create pool */ +static int mvpp2_bm_pool_create(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int size) +{ + u32 val; + + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + + /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 + * bytes per buffer pointer + */ + if (priv->hw_version == MVPP21) + bm_pool->size_bytes = 2 * sizeof(u32) * size; + else + bm_pool->size_bytes = 2 * sizeof(u64) * size; + + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, + &bm_pool->dma_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, bm_pool->dma_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + lower_32_bits(bm_pool->dma_addr)); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_START_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + bm_pool->size = size; + bm_pool->pkt_size = 0; + bm_pool->buf_num = 0; + + return 0; +} + +/* Set pool buffer size */ +static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + int buf_size) +{ + u32 val; + + bm_pool->buf_size = buf_size; + + val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); + mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); +} + +static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *dma_addr, + phys_addr_t *phys_addr) +{ + int cpu = get_cpu(); + + *dma_addr = mvpp2_percpu_read(priv, cpu, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + + if (priv->hw_version == MVPP22) { + u32 val; + u32 dma_addr_highbits, phys_addr_highbits; + + val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); + phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> + MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; + + if (sizeof(dma_addr_t) == 8) + *dma_addr |= (u64)dma_addr_highbits << 32; + + if (sizeof(phys_addr_t) == 8) + *phys_addr |= (u64)phys_addr_highbits << 32; + } + + put_cpu(); +} + +/* Free all buffers from the pool */ +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + int i; + + if (buf_num > bm_pool->buf_num) { + WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", + bm_pool->id, buf_num); + buf_num = bm_pool->buf_num; + } + + for (i = 0; i < buf_num; i++) { + dma_addr_t buf_dma_addr; + phys_addr_t buf_phys_addr; + void *data; + + mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, + &buf_dma_addr, &buf_phys_addr); + + dma_unmap_single(dev, buf_dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + + data = (void *)phys_to_virt(buf_phys_addr); + if (!data) + break; + + mvpp2_frag_free(bm_pool, data); + } + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->buf_num -= i; +} + +/* Check number of buffers in BM pool */ +static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int buf_num = 0; + + buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & + MVPP22_BM_POOL_PTRS_NUM_MASK; + buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & + MVPP2_BM_BPPI_PTR_NUM_MASK; + + /* HW has one buffer ready which is not reflected in the counters */ + if (buf_num) + buf_num += 1; + + return buf_num; +} + +/* Cleanup pool */ +static int mvpp2_bm_pool_destroy(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) +{ + int buf_num; + u32 val; + + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); + + /* Check buffer counters after free */ + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + if (buf_num) { + WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", + bm_pool->id, bm_pool->buf_num); + return 0; + } + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_STOP_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, + bm_pool->dma_addr); + return 0; +} + +static int mvpp2_bm_pools_init(struct platform_device *pdev, + struct mvpp2 *priv) +{ + int i, err, size; + struct mvpp2_bm_pool *bm_pool; + + /* Create all pools with maximum size */ + size = MVPP2_BM_POOL_SIZE_MAX; + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); + if (err) + goto err_unroll_pools; + mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); + } + return 0; + +err_unroll_pools: + dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); + for (i = i - 1; i >= 0; i--) + mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); + return err; +} + +static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int i, err; + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + /* Mask BM all interrupts */ + mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); + /* Clear BM cause register */ + mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); + } + + /* Allocate and initialize BM pools */ + priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, + sizeof(*priv->bm_pools), GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + err = mvpp2_bm_pools_init(pdev, priv); + if (err < 0) + return err; + return 0; +} + +static void mvpp2_setup_bm_pool(void) +{ + /* Short pool */ + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; + + /* Long pool */ + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; + + /* Jumbo pool */ + mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; + mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; +} + +/* Attach long pool to rxq */ +static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + int lrxq, int long_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Attach short pool to rxq */ +static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + int lrxq, int short_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_SHORT_MASK; + else + mask = MVPP22_RXQ_POOL_SHORT_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_dma_addr, + phys_addr_t *buf_phys_addr, + gfp_t gfp_mask) +{ + dma_addr_t dma_addr; + void *data; + + data = mvpp2_frag_alloc(bm_pool); + if (!data) + return NULL; + + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_frag_free(bm_pool, data); + return NULL; + } + *buf_dma_addr = dma_addr; + *buf_phys_addr = virt_to_phys(data); + + return data; +} + +/* Release buffer to BM */ +static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, + dma_addr_t buf_dma_addr, + phys_addr_t buf_phys_addr) +{ + int cpu = get_cpu(); + + if (port->priv->hw_version == MVPP22) { + u32 val = 0; + + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; + + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; + + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + put_cpu(); +} + +/* Allocate buffers for the pool */ +static int mvpp2_bm_bufs_add(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + int i, buf_size, total_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; + + buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); + total_size = MVPP2_RX_TOTAL_SIZE(buf_size); + + if (buf_num < 0 || + (buf_num + bm_pool->buf_num > bm_pool->size)) { + netdev_err(port->dev, + "cannot allocate %d buffers for pool %d\n", + buf_num, bm_pool->id); + return 0; + } + + for (i = 0; i < buf_num; i++) { + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, + &phys_addr, GFP_KERNEL); + if (!buf) + break; + + mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, + phys_addr); + } + + /* Update BM driver with number of buffers added to pool */ + bm_pool->buf_num += i; + + netdev_dbg(port->dev, + "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", + bm_pool->id, bm_pool->pkt_size, buf_size, total_size); + + netdev_dbg(port->dev, + "pool %d: %d of %d buffers added\n", + bm_pool->id, i, buf_num); + return i; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) +{ + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if (pool >= MVPP2_BM_POOLS_NUM) { + netdev_err(port->dev, "Invalid pool %d\n", pool); + return NULL; + } + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (new_pool->pkt_size == 0) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) + pkts_num = mvpp2_pools[pool].buf_num; + else + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool, pkts_num); + + new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + return new_pool; +} + +/* Initialize pools for swf */ +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +{ + int rxq; + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + + /* If port pkt_size is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + long_log_pool = MVPP2_BM_JUMBO; + short_log_pool = MVPP2_BM_LONG; + } else { + long_log_pool = MVPP2_BM_LONG; + short_log_pool = MVPP2_BM_SHORT; + } + + if (!port->pool_long) { + port->pool_long = + mvpp2_bm_pool_use(port, long_log_pool, + mvpp2_pools[long_log_pool].pkt_size); + if (!port->pool_long) + return -ENOMEM; + + port->pool_long->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); + } + + if (!port->pool_short) { + port->pool_short = + mvpp2_bm_pool_use(port, short_log_pool, + mvpp2_pools[short_log_pool].pkt_size); + if (!port->pool_short) + return -ENOMEM; + + port->pool_short->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_short_pool_set(port, rxq, + port->pool_short->id); + } + + return 0; +} + +static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + enum mvpp2_bm_pool_log_num new_long_pool; + int pkt_size = MVPP2_RX_PKT_SIZE(mtu); + + /* If port MTU is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) + new_long_pool = MVPP2_BM_JUMBO; + else + new_long_pool = MVPP2_BM_LONG; + + if (new_long_pool != port->pool_long->id) { + /* Remove port from old short & long pool */ + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, + port->pool_long->pkt_size); + port->pool_long->port_map &= ~BIT(port->id); + port->pool_long = NULL; + + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, + port->pool_short->pkt_size); + port->pool_short->port_map &= ~BIT(port->id); + port->pool_short = NULL; + + port->pkt_size = pkt_size; + + /* Add port to new short & long pool */ + mvpp2_swf_bm_pool_init(port); + + /* Update L4 checksum when jumbo enable/disable on port */ + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } + } + + dev->mtu = mtu; + dev->wanted_features = dev->features; + + netdev_update_features(dev); + return 0; +} + +static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +/* Mask the current CPU's Rx/Tx interrupts + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_mask(void *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); +} + +/* Unmask the current CPU's Rx/Tx interrupts. + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_unmask(void *arg) +{ + struct mvpp2_port *port = arg; + u32 val; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_percpu_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } +} + +/* Port configuration routines */ + +static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; + else if (port->gop_id == 3) + val |= GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); +} + +static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | + GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + if (port->gop_id > 1) { + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val &= ~GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); + } +} + +static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + u32 val; + + /* XPCS */ + val = readl(xpcs + MVPP22_XPCS_CFG0); + val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | + MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); + val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); + writel(val, xpcs + MVPP22_XPCS_CFG0); + + /* MPCS */ + val = readl(mpcs + MVPP22_MPCS_CTRL); + val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; + writel(val, mpcs + MVPP22_MPCS_CTRL); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | + MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); +} + +static int mvpp22_gop_init(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + if (!priv->sysctrl_base) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->gop_id == 0) + goto invalid_conf; + mvpp22_gop_init_rgmii(port); + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + mvpp22_gop_init_sgmii(port); + break; + case PHY_INTERFACE_MODE_10GKR: + if (port->gop_id != 0) + goto invalid_conf; + mvpp22_gop_init_10gkr(port); + break; + default: + goto unsupported_conf; + } + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); + val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | + GENCONF_PORT_CTRL1_EN(port->gop_id); + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); + val |= GENCONF_SOFT_RESET1_GOP; + regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); + +unsupported_conf: + return 0; + +invalid_conf: + netdev_err(port->dev, "Invalid port configuration\n"); + return -EINVAL; +} + +static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + /* Enable the GMAC link status irq for this port */ + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } + + if (port->gop_id == 0) { + /* Enable the XLG/GIG irqs for this port */ + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + val |= MVPP22_XLG_EXT_INT_MASK_XLG; + else + val |= MVPP22_XLG_EXT_INT_MASK_GIG; + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } +} + +static void mvpp22_gop_mask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | + MVPP22_XLG_EXT_INT_MASK_GIG); + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } +} + +static void mvpp22_gop_setup_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_MASK); + val |= MVPP22_GMAC_INT_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_MASK); + } + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_INT_MASK); + val |= MVPP22_XLG_INT_MASK_LINK; + writel(val, port->base + MVPP22_XLG_INT_MASK); + } + + mvpp22_gop_unmask_irq(port); +} + +/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). + * + * The PHY mode used by the PPv2 driver comes from the network subsystem, while + * the one given to the COMPHY comes from the generic PHY subsystem. Hence they + * differ. + * + * The COMPHY configures the serdes lanes regardless of the actual use of the + * lanes by the physical layer. This is why configurations like + * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. + */ +static int mvpp22_comphy_init(struct mvpp2_port *port) +{ + enum phy_mode mode; + int ret; + + if (!port->comphy) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mode = PHY_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_2500BASEX: + mode = PHY_MODE_2500SGMII; + break; + case PHY_INTERFACE_MODE_10GKR: + mode = PHY_MODE_10GKR; + break; + default: + return -EINVAL; + } + + ret = phy_set_mode(port->comphy, mode); + if (ret) + return ret; + + return phy_power_on(port->comphy); +} + +static void mvpp2_port_enable(struct mvpp2_port *port) +{ + u32 val; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS; + val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } +} + +static void mvpp2_port_disable(struct mvpp2_port *port) +{ + u32 val; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_PORT_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + /* Disable & reset should be done separately */ + val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } +} + +/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ +static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & + ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +/* Configure loopback port */ +static void mvpp2_port_loopback_set(struct mvpp2_port *port, + const struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + + if (state->speed == 1000) + val |= MVPP2_GMAC_GMII_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) + val |= MVPP2_GMAC_PCS_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; + + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +struct mvpp2_ethtool_counter { + unsigned int offset; + const char string[ETH_GSTRING_LEN]; + bool reg_is_64b; +}; + +static u64 mvpp2_read_count(struct mvpp2_port *port, + const struct mvpp2_ethtool_counter *counter) +{ + u64 val; + + val = readl(port->stats_base + counter->offset); + if (counter->reg_is_64b) + val += (u64)readl(port->stats_base + counter->offset + 4) << 32; + + return val; +} + +/* Due to the fact that software statistics and hardware statistics are, by + * design, incremented at different moments in the chain of packet processing, + * it is very likely that incoming packets could have been dropped after being + * counted by hardware but before reaching software statistics (most probably + * multicast packets), and in the oppposite way, during transmission, FCS bytes + * are added in between as well as TSO skb will be split and header bytes added. + * Hence, statistics gathered from userspace with ifconfig (software) and + * ethtool (hardware) cannot be compared. + */ +static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { + { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, + { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, + { MVPP2_MIB_FC_SENT, "fc_sent" }, + { MVPP2_MIB_FC_RCVD, "fc_received" }, + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, + { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, + { MVPP2_MIB_COLLISION, "collision" }, + { MVPP2_MIB_LATE_COLLISION, "late_collision" }, +}; + +static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + memcpy(data + i * ETH_GSTRING_LEN, + &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + } +} + +static void mvpp2_gather_hw_statistics(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, + stats_work); + u64 *pstats; + int i; + + mutex_lock(&port->gather_stats_lock); + + pstats = port->ethtool_stats; + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); + + /* No need to read again the counters right after this function if it + * was called asynchronously by the user (ie. use of ethtool). + */ + cancel_delayed_work(&port->stats_work); + queue_delayed_work(port->priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + mutex_unlock(&port->gather_stats_lock); +} + +static void mvpp2_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Update statistics for the given port, then take the lock to avoid + * concurrent accesses on the ethtool_stats structure during its copy. + */ + mvpp2_gather_hw_statistics(&port->stats_work.work); + + mutex_lock(&port->gather_stats_lock); + memcpy(data, port->ethtool_stats, + sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); + mutex_unlock(&port->gather_stats_lock); +} + +static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvpp2_ethtool_regs); + + return -EOPNOTSUPP; +} + +static void mvpp2_port_reset(struct mvpp2_port *port) +{ + u32 val; + unsigned int i; + + /* Read the GOP statistics to reset the hardware counters */ + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP2_GMAC_MAX_RX_SIZE_OFFS); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; + val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); +} + +/* Set defaults to the MVPP2 port */ +static void mvpp2_defaults_set(struct mvpp2_port *port) +{ + int tx_port_num, val, queue, ptxq, lrxq; + + if (port->priv->hw_version == MVPP21) { + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, + tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Close bandwidth for all queues */ + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { + ptxq = mvpp2_txq_phys(port->id, queue); + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); + } + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum + */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, + port->priv->tclk / USEC_PER_SEC); + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); + val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); + val = MVPP2_TXP_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + + /* Set MaximumLowLatencyPacketSize value to 256 */ + mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), + MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | + MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); + + /* Enable Rx cache snoop */ + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_SNOOP_PKT_SIZE_MASK | + MVPP2_SNOOP_BUF_HDR_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } + + /* At default, mask all interrupts to all present cpus */ + mvpp2_interrupts_disable(port); +} + +/* Enable/disable receiving packets */ +static void mvpp2_ingress_enable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val &= ~MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +static void mvpp2_ingress_disable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +/* Enable transmit via physical egress queue + * - HW starts take descriptors from DRAM + */ +static void mvpp2_egress_enable(struct mvpp2_port *port) +{ + u32 qmap; + int queue; + int tx_port_num = mvpp2_egress_port(port); + + /* Enable all initialized TXs. */ + qmap = 0; + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + if (txq->descs) + qmap |= (1 << queue); + } + + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); +} + +/* Disable transmit via physical egress queue + * - HW doesn't take descriptors from DRAM + */ +static void mvpp2_egress_disable(struct mvpp2_port *port) +{ + u32 reg_data; + int delay; + int tx_port_num = mvpp2_egress_port(port); + + /* Issue stop command for active channels only */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & + MVPP2_TXP_SCHED_ENQ_MASK; + if (reg_data != 0) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, + (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); + + /* Wait for all Tx activity to terminate. */ + delay = 0; + do { + if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "Tx stop timed out, status=0x%08x\n", + reg_data); + break; + } + mdelay(1); + delay++; + + /* Check port TX Command register that all + * Tx queues are stopped + */ + reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); + } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); +} + +/* Rx descriptors helper methods */ + +/* Get number of Rx descriptors occupied by received packets */ +static inline int +mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) +{ + u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); + + return val & MVPP2_RXQ_OCCUPIED_MASK; +} + +/* Update Rx queue status with the number of occupied and available + * Rx descriptor slots. + */ +static inline void +mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, + int used_count, int free_count) +{ + /* Decrement the number of used descriptors and increment count + * increment the number of free descriptors. + */ + u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); + + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static inline struct mvpp2_rx_desc * +mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Set rx queue offset */ +static void mvpp2_rxq_offset_set(struct mvpp2_port *port, + int prxq, int offset) +{ + u32 val; + + /* Convert offset from bytes to units of 32 bytes */ + offset = offset >> 5; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; + + /* Offset is in */ + val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & + MVPP2_RXQ_PACKET_OFFSET_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Tx descriptors helper methods */ + +/* Get pointer to next Tx descriptor to be processed (send) by HW */ +static struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Update HW with number of aggregated Tx descriptors to be sent + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) +{ + /* aggregated access - relevant TXQ number is written in TX desc */ + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_AGGR_TXQ_UPDATE_REG, pending); +} + +/* Check if there are enough free descriptors in aggregated txq. + * If not, update the number of occupied descriptors and repeat the check. + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + struct mvpp2_tx_queue *aggr_txq, int num) +{ + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { + /* Update number of occupied aggregated Tx descriptors */ + int cpu = smp_processor_id(); + u32 val = mvpp2_read_relaxed(priv, + MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + + aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; + + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) + return -ENOMEM; + } + return 0; +} + +/* Reserved Tx descriptors allocation request + * + * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called + * only by mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, int num) +{ + u32 val; + int cpu = smp_processor_id(); + + val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; + mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + + val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + + return val & MVPP2_TXQ_RSVD_RSLT_MASK; +} + +/* Check if there are enough reserved descriptors for transmission. + * If not, request chunk of reserved descriptors and check again. + */ +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int num) +{ + int req, cpu, desc_count; + + if (txq_pcpu->reserved_num >= num) + return 0; + + /* Not enough descriptors reserved! Update the reserved descriptor + * count and check again. + */ + + desc_count = 0; + /* Compute total of used descriptors */ + for_each_present_cpu(cpu) { + struct mvpp2_txq_pcpu *txq_pcpu_aux; + + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + desc_count += txq_pcpu_aux->count; + desc_count += txq_pcpu_aux->reserved_num; + } + + req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); + desc_count += req; + + if (desc_count > + (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + return -ENOMEM; + + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + + /* OK, the descriptor could have been updated: check again. */ + if (txq_pcpu->reserved_num < num) + return -ENOMEM; + return 0; +} + +/* Release the last allocated Tx descriptor. Useful to handle DMA + * mapping failures in the Tx path. + */ +static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set Tx descriptors fields relevant for CSUM calculation */ +static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type required only for checksum calculation + */ + command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); + command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); + command |= MVPP2_TXD_IP_CSUM_DISABLE; + + if (l3_proto == swab16(ETH_P_IP)) { + command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ + command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ + } else { + command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ + } + + if (l4_proto == IPPROTO_TCP) { + command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else if (l4_proto == IPPROTO_UDP) { + command |= MVPP2_TXD_L4_UDP; /* enable UDP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else { + command |= MVPP2_TXD_L4_CSUM_NOT; + } + + return command; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + * Per-CPU access + * + * Called only from mvpp2_txq_done(), called from mvpp2_tx() + * (migration disabled) and from the TX completion tasklet (migration + * disabled) so using smp_processor_id() is OK. + */ +static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + + /* Reading status reg resets transmitted descriptor counter */ + val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); + + return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> + MVPP2_TRANSMITTED_COUNT_OFFSET; +} + +/* Called through on_each_cpu(), so runs on all CPUs, with migration + * disabled, therefore using smp_processor_id() is OK. + */ +static void mvpp2_txq_sent_counter_clear(void *arg) +{ + struct mvpp2_port *port = arg; + int queue; + + for (queue = 0; queue < port->ntxqs; queue++) { + int id = port->txqs[queue]->id; + + mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(id)); + } +} + +/* Set max sizes for Tx queues */ +static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) +{ + u32 val, size, mtu; + int txq, tx_port_num; + + mtu = port->pkt_size * 8; + if (mtu > MVPP2_TXP_MTU_MAX) + mtu = MVPP2_TXP_MTU_MAX; + + /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ + mtu = 3 * mtu; + + /* Indirect access to registers */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + /* Set MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); + val &= ~MVPP2_TXP_MTU_MAX; + val |= mtu; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); + + /* TXP token size and all TXQs token size must be larger that MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); + size = val & MVPP2_TXP_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + } + + for (txq = 0; txq < port->ntxqs; txq++) { + val = mvpp2_read(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); + size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; + + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), + val); + } + } +} + +/* Set the number of packets that will be received before Rx interrupt + * will be generated by HW. + */ +static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int cpu = get_cpu(); + + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; + + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); + + put_cpu(); +} + +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + int cpu = get_cpu(); + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + + put_cpu(); +} + +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; + + do_div(tmp, USEC_PER_SEC); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +/* Set the time delay in usec before Rx interrupt */ +static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); +} + +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + +/* Free Tx queue skbuffs */ +static void mvpp2_txq_bufs_free(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_get_index; + + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, + tx_buf->size, DMA_TO_DEVICE); + if (tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + + mvpp2_txq_inc_get(txq_pcpu); + } +} + +static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->rxqs[queue]; +} + +static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->txqs[queue]; +} + +/* Handle end of transmission */ +static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); + int tx_done; + + if (txq_pcpu->cpu != smp_processor_id()) + netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); + + tx_done = mvpp2_txq_sent_desc_proc(port, txq); + if (!tx_done) + return; + mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); + + txq_pcpu->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) + if (txq_pcpu->count <= txq_pcpu->wake_threshold) + netif_tx_wake_queue(nq); +} + +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + int cpu) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + +/* Rx/Tx queue initialization/cleanup methods */ + +/* Allocate and initialize descriptors for aggr TXQ */ +static int mvpp2_aggr_txq_init(struct platform_device *pdev, + struct mvpp2_tx_queue *aggr_txq, int cpu, + struct mvpp2 *priv) +{ + u32 txq_dma; + + /* Allocate memory for TX descriptors */ + aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + &aggr_txq->descs_dma, GFP_KERNEL); + if (!aggr_txq->descs) + return -ENOMEM; + + aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; + + /* Aggr TXQ no reset WA */ + aggr_txq->next_desc_to_proc = mvpp2_read(priv, + MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + MVPP2_AGGR_TXQ_SIZE); + + return 0; +} + +/* Create a specified Rx queue */ +static int mvpp2_rxq_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) + +{ + u32 rxq_dma; + int cpu; + + rxq->size = port->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + &rxq->descs_dma, GFP_KERNEL); + if (!rxq->descs) + return -ENOMEM; + + rxq->last_desc = rxq->size - 1; + + /* Zero occupied and non-occupied counters - direct access */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + + /* Set Rx descriptors queue starting address - indirect access */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); + + /* Set Offset */ + mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); + + /* Set coalescing pkts and time */ + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + + /* Add number of descriptors ready for receiving packets */ + mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); + + return 0; +} + +/* Push packets received by the RXQ to BM pool */ +static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int rx_received, i; + + rx_received = mvpp2_rxq_received(port, rxq->id); + if (!rx_received) + return; + + for (i = 0; i < rx_received; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + + mvpp2_bm_pool_put(port, pool, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); + } + mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); +} + +/* Cleanup Rx queue */ +static void mvpp2_rxq_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int cpu; + + mvpp2_rxq_drop_pkts(port, rxq); + + if (rxq->descs) + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_dma); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_dma = 0; + + /* Clear Rx descriptors queue starting address and size; + * free descriptor number + */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Create and initialize a Tx queue */ +static int mvpp2_txq_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + int cpu, desc, desc_per_txq, tx_port_num; + struct mvpp2_txq_pcpu *txq_pcpu; + + txq->size = port->tx_ring_size; + + /* Allocate memory for Tx descriptors */ + txq->descs = dma_alloc_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + &txq->descs_dma, GFP_KERNEL); + if (!txq->descs) + return -ENOMEM; + + txq->last_desc = txq->size - 1; + + /* Set Tx descriptors queue starting address - indirect access */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + txq->descs_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + txq->size & MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val &= ~MVPP2_TXQ_PENDING_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + + /* Calculate base address in prefetch buffer. We reserve 16 descriptors + * for each existing TXQ. + * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT + * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS + */ + desc_per_txq = 16; + desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + + (txq->log_id * desc_per_txq); + + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); + + /* WRR / EJP configuration - indirect access */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); + val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); + + val = MVPP2_TXQ_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), + val); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->size = txq->size; + txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, + sizeof(*txq_pcpu->buffs), + GFP_KERNEL); + if (!txq_pcpu->buffs) + return -ENOMEM; + + txq_pcpu->count = 0; + txq_pcpu->reserved_num = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + txq_pcpu->tso_headers = NULL; + + txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; + txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; + + txq_pcpu->tso_headers = + dma_alloc_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + &txq_pcpu->tso_headers_dma, + GFP_KERNEL); + if (!txq_pcpu->tso_headers) + return -ENOMEM; + } + + return 0; +} + +/* Free allocated TXQ resources */ +static void mvpp2_txq_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int cpu; + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->buffs); + + if (txq_pcpu->tso_headers) + dma_free_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); + + txq_pcpu->tso_headers = NULL; + } + + if (txq->descs) + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_dma); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_dma = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Cleanup Tx ports */ +static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int delay, pending, cpu; + u32 val; + + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + val |= MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + + /* The napi queue has been stopped so wait for all packets + * to be transmitted. + */ + delay = 0; + do { + if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "port %d: cleaning queue %d timed out\n", + port->id, txq->log_id); + break; + } + mdelay(1); + delay++; + + pending = mvpp2_percpu_read(port->priv, cpu, + MVPP2_TXQ_PENDING_REG); + pending &= MVPP2_TXQ_PENDING_MASK; + } while (pending); + + val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + + /* Release all packets */ + mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); + + /* Reset queue */ + txq_pcpu->count = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + } +} + +/* Cleanup all Tx queues */ +static void mvpp2_cleanup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue; + u32 val; + + val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); + + /* Reset Tx ports and delete Tx queues */ + val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_txq_clean(port, txq); + mvpp2_txq_deinit(port, txq); + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + + val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); +} + +/* Cleanup all Rx queues */ +static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) +{ + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) + mvpp2_rxq_deinit(port, port->rxqs[queue]); +} + +/* Init all Rx queues for port */ +static int mvpp2_setup_rxqs(struct mvpp2_port *port) +{ + int queue, err; + + for (queue = 0; queue < port->nrxqs; queue++) { + err = mvpp2_rxq_init(port, port->rxqs[queue]); + if (err) + goto err_cleanup; + } + return 0; + +err_cleanup: + mvpp2_cleanup_rxqs(port); + return err; +} + +/* Init all tx queues for port */ +static int mvpp2_setup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue, err; + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + err = mvpp2_txq_init(port, txq); + if (err) + goto err_cleanup; + } + + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + return 0; + +err_cleanup: + mvpp2_cleanup_txqs(port); + return err; +} + +/* The callback for per-port interrupt */ +static irqreturn_t mvpp2_isr(int irq, void *dev_id) +{ + struct mvpp2_queue_vector *qv = dev_id; + + mvpp2_qvec_interrupt_disable(qv); + + napi_schedule(&qv->napi); + + return IRQ_HANDLED; +} + +/* Per-port interrupt for link status changes */ +static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) +{ + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct net_device *dev = port->dev; + bool event = false, link = false; + u32 val; + + mvpp22_gop_mask_irq(port); + + if (port->gop_id == 0 && + port->phy_interface == PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP22_XLG_INT_STAT); + if (val & MVPP22_XLG_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP22_XLG_STATUS); + if (val & MVPP22_XLG_STATUS_LINK_UP) + link = true; + } + } else if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_STAT); + if (val & MVPP22_GMAC_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP2_GMAC_STATUS0); + if (val & MVPP2_GMAC_STATUS0_LINK_UP) + link = true; + } + } + + if (port->phylink) { + phylink_mac_change(port->phylink, link); + goto handled; + } + + if (!netif_running(dev) || !event) + goto handled; + + if (link) { + mvpp2_interrupts_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } else { + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + mvpp2_ingress_disable(port); + mvpp2_egress_disable(port); + + mvpp2_interrupts_disable(port); + } + +handled: + mvpp22_gop_unmask_irq(port); + return IRQ_HANDLED; +} + +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << port->ntxqs) - 1; + tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + +/* Main RX/TX processing routines */ + +/* Display more error info */ +static void mvpp2_rx_error(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); + char *err_str = NULL; + + switch (status & MVPP2_RXD_ERR_CODE_MASK) { + case MVPP2_RXD_ERR_CRC: + err_str = "crc"; + break; + case MVPP2_RXD_ERR_OVERRUN: + err_str = "overrun"; + break; + case MVPP2_RXD_ERR_RESOURCE: + err_str = "resource"; + break; + } + if (err_str && net_ratelimit()) + netdev_err(port->dev, + "bad rx status %08x (%s error), size=%zu\n", + status, err_str, sz); +} + +/* Handle RX checksum offload */ +static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, + struct sk_buff *skb) +{ + if (((status & MVPP2_RXD_L3_IP4) && + !(status & MVPP2_RXD_IP4_HEADER_ERR)) || + (status & MVPP2_RXD_L3_IP6)) + if (((status & MVPP2_RXD_L4_UDP) || + (status & MVPP2_RXD_L4_TCP)) && + (status & MVPP2_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ +static int mvpp2_rx_refill(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int pool) +{ + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; + + /* No recycle or too many buffers are in use, so allocate a new skb */ + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, + GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + + return 0; +} + +/* Handle tx checksum */ +static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + u8 l4_proto; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else { + return MVPP2_TXD_L4_CSUM_NOT; + } + + return mvpp2_txq_desc_csum(skb_network_offset(skb), + skb->protocol, ip_hdr_len, l4_proto); + } + + return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; +} + +/* Main rx processing */ +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) +{ + struct net_device *dev = port->dev; + int rx_received; + int rx_done = 0; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets and clamp the to-do */ + rx_received = mvpp2_rxq_received(port, rxq->id); + if (rx_todo > rx_received) + rx_todo = rx_received; + + while (rx_done < rx_todo) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + struct mvpp2_bm_pool *bm_pool; + struct sk_buff *skb; + unsigned int frag_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + u32 rx_status; + int pool, rx_bytes, err; + void *data; + + rx_done++; + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + bm_pool = &port->priv->bm_pools[pool]; + + /* In case of an error, release the requested buffer pointer + * to the Buffer Manager. This request process is controlled + * by the hardware, and the information about the buffer is + * comprised by the RX descriptor. + */ + if (rx_status & MVPP2_RXD_ERR_SUMMARY) { +err_drop_frame: + dev->stats.rx_errors++; + mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + continue; + } + + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } + + err = mvpp2_rx_refill(port, bm_pool, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + dma_unmap_single(dev->dev.parent, dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + skb->protocol = eth_type_trans(skb, dev); + mvpp2_rx_csum(port, rx_status, skb); + + napi_gro_receive(napi, skb); + } + + if (rcvd_pkts) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update Rx queue management counters */ + wmb(); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); + + return rx_todo; +} + +static inline void +tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_tx_desc *desc) +{ + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + + dma_addr_t buf_dma_addr = + mvpp2_txdesc_dma_addr_get(port, desc); + size_t buf_sz = + mvpp2_txdesc_size_get(port, desc); + if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, + buf_sz, DMA_TO_DEVICE); + mvpp2_txq_desc_put(txq); +} + +/* Handle tx fragmentation processing */ +static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + struct mvpp2_tx_desc *tx_desc; + int i; + dma_addr_t buf_dma_addr; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = page_address(frag->page.p) + frag->page_offset; + + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, frag->size); + + buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, + frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { + mvpp2_txq_desc_put(txq); + goto cleanup; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (i == (skb_shinfo(skb)->nr_frags - 1)) { + /* Last descriptor */ + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L_DESC); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + } else { + /* Descriptor in the middle: Not First, Not Last */ + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + } + } + + return 0; +cleanup: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + + return -ENOMEM; +} + +static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, + struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int hdr_sz) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); + + addr = txq_pcpu->tso_headers_dma + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); + + mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | + MVPP2_TXD_F_DESC | + MVPP2_TXD_PADDING_DISABLE); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); +} + +static inline int mvpp2_tso_put_data(struct sk_buff *skb, + struct net_device *dev, struct tso_t *tso, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int sz, bool left, bool last) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t buf_dma_addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, sz); + + buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + return -ENOMEM; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (!left) { + mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); + if (last) { + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + return 0; + } + } else { + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + } + + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + return 0; +} + +static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct tso_t tso; + int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i, len, descs = 0; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, + tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + tso_count_descs(skb))) + return 0; + + tso_start(skb, &tso); + len = skb->len - hdr_sz; + while (len > 0) { + int left = min_t(int, skb_shinfo(skb)->gso_size, len); + char *hdr = txq_pcpu->tso_headers + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + + len -= left; + descs++; + + tso_build_hdr(skb, hdr, &tso, left, len == 0); + mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); + + while (left > 0) { + int sz = min_t(int, tso.size, left); + left -= sz; + descs++; + + if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, + txq_pcpu, sz, left, len == 0)) + goto release; + tso_build_data(skb, &tso, sz); + } + } + + return descs; + +release: + for (i = descs - 1; i >= 0; i--) { + struct mvpp2_tx_desc *tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + return 0; +} + +/* Main tx processing */ +static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_queue *txq, *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_desc *tx_desc; + dma_addr_t buf_dma_addr; + int frags = 0; + u16 txq_id; + u32 tx_cmd; + + txq_id = skb_get_queue_mapping(skb); + txq = port->txqs[txq_id]; + txq_pcpu = this_cpu_ptr(txq->pcpu); + aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + + if (skb_is_gso(skb)) { + frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); + goto out; + } + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, + txq_pcpu, frags)) { + frags = 0; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); + + buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + frags = 0; + goto out; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + tx_cmd = mvpp2_skb_tx_csum(port, skb); + + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + } else { + /* First but not Last */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + + /* Continue with other skb fragments */ + if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { + tx_desc_unmap_put(port, txq, tx_desc); + frags = 0; + } + } + +out: + if (frags > 0) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->count >= txq_pcpu->stop_threshold) + netif_tx_stop_queue(nq); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + /* Finalize TX processing */ + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + + return NETDEV_TX_OK; +} + +static inline void mvpp2_cause_error(struct net_device *dev, int cause) +{ + if (cause & MVPP2_CAUSE_FCS_ERR_MASK) + netdev_err(dev, "FCS error\n"); + if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) + netdev_err(dev, "rx fifo overrun error\n"); + if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) + netdev_err(dev, "tx fifo underrun error\n"); +} + +static int mvpp2_poll(struct napi_struct *napi, int budget) +{ + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; + int cpu = smp_processor_id(); + + qv = container_of(napi, struct mvpp2_queue_vector, napi); + + /* Rx/Tx cause register + * + * Bits 0-15: each bit indicates received packets on the Rx queue + * (bit 0 is for Rx queue 0). + * + * Bits 16-23: each bit indicates transmitted packets on the Tx queue + * (bit 16 is for Tx queue 0). + * + * Each CPU has its own Rx/Tx cause register + */ + cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + if (cause_misc) { + mvpp2_cause_error(port->dev, cause_misc); + + /* Clear the cause register */ + mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + } + + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } + + /* Process RX packets */ + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx <<= qv->first_rxq; + cause_rx |= qv->pending_cause_rx; + while (cause_rx && budget > 0) { + int count; + struct mvpp2_rx_queue *rxq; + + rxq = mvpp2_get_rx_queue(port, cause_rx); + if (!rxq) + break; + + count = mvpp2_rx(port, napi, budget, rxq); + rx_done += count; + budget -= count; + if (budget > 0) { + /* Clear the bit associated to this Rx queue + * so that next iteration will continue from + * the next Rx queue. + */ + cause_rx &= ~(1 << rxq->logic_rxq); + } + } + + if (budget > 0) { + cause_rx = 0; + napi_complete_done(napi, rx_done); + + mvpp2_qvec_interrupt_enable(qv); + } + qv->pending_cause_rx = cause_rx; + return rx_done; +} + +static void mvpp22_mode_reconfigure(struct mvpp2_port *port) +{ + u32 ctrl3; + + /* comphy reconfiguration */ + mvpp22_comphy_init(port); + + /* gop reconfiguration */ + mvpp22_gop_init(port); + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0) { + ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); + ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR) + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); + } + + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + mvpp2_xlg_max_rx_size_set(port); + else + mvpp2_gmac_max_rx_size_set(port); +} + +/* Set hw internals when starting port */ +static void mvpp2_start_dev(struct mvpp2_port *port) +{ + int i; + + mvpp2_txp_max_tx_size_set(port); + + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); + + /* Enable interrupts on all CPUs */ + mvpp2_interrupts_enable(port); + + if (port->priv->hw_version == MVPP22) + mvpp22_mode_reconfigure(port); + + if (port->phylink) { + phylink_start(port->phylink); + } else { + /* Phylink isn't used as of now for ACPI, so the MAC has to be + * configured manually when the interface is started. This will + * be removed as soon as the phylink ACPI support lands in. + */ + struct phylink_link_state state = { + .interface = port->phy_interface, + .link = 1, + }; + mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); + } + + netif_tx_start_all_queues(port->dev); +} + +/* Set hw internals when stopping port */ +static void mvpp2_stop_dev(struct mvpp2_port *port) +{ + int i; + + /* Disable interrupts on all CPUs */ + mvpp2_interrupts_disable(port); + + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); + + if (port->phylink) + phylink_stop(port->phylink); + phy_power_off(port->comphy); +} + +static int mvpp2_check_ringparam_valid(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + u16 new_rx_pending = ring->rx_pending; + u16 new_tx_pending = ring->tx_pending; + + if (ring->rx_pending == 0 || ring->tx_pending == 0) + return -EINVAL; + + if (ring->rx_pending > MVPP2_MAX_RXD_MAX) + new_rx_pending = MVPP2_MAX_RXD_MAX; + else if (!IS_ALIGNED(ring->rx_pending, 16)) + new_rx_pending = ALIGN(ring->rx_pending, 16); + + if (ring->tx_pending > MVPP2_MAX_TXD_MAX) + new_tx_pending = MVPP2_MAX_TXD_MAX; + else if (!IS_ALIGNED(ring->tx_pending, 32)) + new_tx_pending = ALIGN(ring->tx_pending, 32); + + /* The Tx ring size cannot be smaller than the minimum number of + * descriptors needed for TSO. + */ + if (new_tx_pending < MVPP2_MAX_SKB_DESCS) + new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); + + if (ring->rx_pending != new_rx_pending) { + netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", + ring->rx_pending, new_rx_pending); + ring->rx_pending = new_rx_pending; + } + + if (ring->tx_pending != new_tx_pending) { + netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", + ring->tx_pending, new_tx_pending); + ring->tx_pending = new_tx_pending; + } + + return 0; +} + +static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_m, mac_addr_h; + + mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); + mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = mac_addr_m & 0xFF; + addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; +} + +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_affinity_hint(qv->irq, + cpumask_of(qv->sw_thread_id)); + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); + free_irq(qv->irq, qv); + } +} + +static int mvpp2_open(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2 *priv = port->priv; + unsigned char mac_bcast[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + bool valid = false; + int err; + + err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); + return err; + } + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); + return err; + } + err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); + if (err) { + netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); + return err; + } + err = mvpp2_prs_def_flow(port); + if (err) { + netdev_err(dev, "mvpp2_prs_def_flow failed\n"); + return err; + } + + /* Allocate the Rx/Tx queues */ + err = mvpp2_setup_rxqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Rx queues\n"); + return err; + } + + err = mvpp2_setup_txqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Tx queues\n"); + goto err_cleanup_rxqs; + } + + err = mvpp2_irqs_init(port); + if (err) { + netdev_err(port->dev, "cannot init IRQs\n"); + goto err_cleanup_txqs; + } + + /* Phylink isn't supported yet in ACPI mode */ + if (port->of_node) { + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(port->dev, "could not attach PHY (%d)\n", + err); + goto err_free_irq; + } + + valid = true; + } + + if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, + dev->name, port); + if (err) { + netdev_err(port->dev, "cannot request link IRQ %d\n", + port->link_irq); + goto err_free_irq; + } + + mvpp22_gop_setup_irq(port); + + /* In default link is down */ + netif_carrier_off(port->dev); + + valid = true; + } else { + port->link_irq = 0; + } + + if (!valid) { + netdev_err(port->dev, + "invalid configuration: no dt or link IRQ"); + goto err_free_irq; + } + + /* Unmask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); + + mvpp2_start_dev(port); + + if (priv->hw_version == MVPP22) + mvpp22_init_rss(port); + + /* Start hardware statistics gathering */ + queue_delayed_work(priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + return 0; + +err_free_irq: + mvpp2_irqs_deinit(port); +err_cleanup_txqs: + mvpp2_cleanup_txqs(port); +err_cleanup_rxqs: + mvpp2_cleanup_rxqs(port); + return err; +} + +static int mvpp2_stop(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; + + mvpp2_stop_dev(port); + + /* Mask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); + + if (port->phylink) + phylink_disconnect_phy(port->phylink); + if (port->link_irq) + free_irq(port->link_irq, port); + + mvpp2_irqs_deinit(port); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } + } + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + cancel_delayed_work_sync(&port->stats_work); + + return 0; +} + +static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int ret; + + netdev_hw_addr_list_for_each(ha, list) { + ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); + if (ret) + return ret; + } + + return 0; +} + +static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) +{ + if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + mvpp2_prs_vid_enable_filtering(port); + else + mvpp2_prs_vid_disable_filtering(port); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, enable); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, enable); +} + +static void mvpp2_set_rx_mode(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Clear the whole UC and MC list */ + mvpp2_prs_mac_del_all(port); + + if (dev->flags & IFF_PROMISC) { + mvpp2_set_rx_promisc(port, true); + return; + } + + mvpp2_set_rx_promisc(port, false); + + if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->uc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, true); + + if (dev->flags & IFF_ALLMULTI) { + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); + return; + } + + if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->mc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); +} + +static int mvpp2_set_mac_address(struct net_device *dev, void *p) +{ + const struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = mvpp2_prs_update_mac_da(dev, addr->sa_data); + if (err) { + /* Reconfigure parser accept the original MAC address */ + mvpp2_prs_update_mac_da(dev, dev->dev_addr); + netdev_err(dev, "failed to change MAC address\n"); + } + return err; +} + +static int mvpp2_change_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, + ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); + } + + if (!netif_running(dev)) { + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + return 0; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto log_error; + } + + mvpp2_stop_dev(port); + + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + goto out_start; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto log_error; + +out_start: + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; +log_error: + netdev_err(dev, "failed to change MTU\n"); + return err; +} + +static void +mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mvpp2_port *port = netdev_priv(dev); + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; +} + +static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_mii_ioctl(port->phylink, ifr, cmd); +} + +static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + ret = mvpp2_prs_vid_entry_add(port, vid); + if (ret) + netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", + MVPP2_PRS_VLAN_FILT_MAX - 1); + return ret; +} + +static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_prs_vid_entry_remove(port, vid); + return 0; +} + +static int mvpp2_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct mvpp2_port *port = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + mvpp2_prs_vid_enable_filtering(port); + } else { + /* Invalidate all registered VID filters for this + * port + */ + mvpp2_prs_vid_remove_all(port); + + mvpp2_prs_vid_disable_filtering(port); + } + } + + return 0; +} + +/* Ethtool methods */ + +static int mvpp2_ethtool_nway_reset(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(port->phylink); +} + +/* Set interrupt coalescing for ethtools */ +static int mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + } + + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); + } + + return 0; +} + +/* get coalescing for ethtools */ +static int mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + + c->rx_coalesce_usecs = port->rxqs[0]->time_coal; + c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; + c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + c->tx_coalesce_usecs = port->tx_time_coal; + return 0; +} + +static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + +static void mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + + ring->rx_max_pending = MVPP2_MAX_RXD_MAX; + ring->tx_max_pending = MVPP2_MAX_TXD_MAX; + ring->rx_pending = port->rx_ring_size; + ring->tx_pending = port->tx_ring_size; +} + +static int mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + u16 prev_rx_ring_size = port->rx_ring_size; + u16 prev_tx_ring_size = port->tx_ring_size; + int err; + + err = mvpp2_check_ringparam_valid(dev, ring); + if (err) + return err; + + if (!netif_running(dev)) { + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvpp2_stop_dev(port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + + err = mvpp2_setup_rxqs(port); + if (err) { + /* Reallocate Rx queues with the original ring size */ + port->rx_ring_size = prev_rx_ring_size; + ring->rx_pending = prev_rx_ring_size; + err = mvpp2_setup_rxqs(port); + if (err) + goto err_out; + } + err = mvpp2_setup_txqs(port); + if (err) { + /* Reallocate Tx queues with the original ring size */ + port->tx_ring_size = prev_tx_ring_size; + ring->tx_pending = prev_tx_ring_size; + err = mvpp2_setup_txqs(port); + if (err) + goto err_clean_rxqs; + } + + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; + +err_clean_rxqs: + mvpp2_cleanup_rxqs(port); +err_out: + netdev_err(dev, "failed to change ring parameters"); + return err; +} + +static void mvpp2_ethtool_get_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return; + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_set_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +/* Device ops */ + +static const struct net_device_ops mvpp2_netdev_ops = { + .ndo_open = mvpp2_open, + .ndo_stop = mvpp2_stop, + .ndo_start_xmit = mvpp2_tx, + .ndo_set_rx_mode = mvpp2_set_rx_mode, + .ndo_set_mac_address = mvpp2_set_mac_address, + .ndo_change_mtu = mvpp2_change_mtu, + .ndo_get_stats64 = mvpp2_get_stats64, + .ndo_do_ioctl = mvpp2_ioctl, + .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, + .ndo_set_features = mvpp2_set_features, +}; + +static const struct ethtool_ops mvpp2_eth_tool_ops = { + .nway_reset = mvpp2_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .set_coalesce = mvpp2_ethtool_set_coalesce, + .get_coalesce = mvpp2_ethtool_get_coalesce, + .get_drvinfo = mvpp2_ethtool_get_drvinfo, + .get_ringparam = mvpp2_ethtool_get_ringparam, + .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, + .get_pauseparam = mvpp2_ethtool_get_pause_param, + .set_pauseparam = mvpp2_ethtool_set_pause_param, + .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, + .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, +}; + +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v; + int i, ret; + + port->nqvecs = num_possible_cpus(); + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + port->nqvecs += 1; + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i * MVPP2_DEFAULT_RXQ; + v->nrxqs = MVPP2_DEFAULT_RXQ; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + if (port_node) + v->irq = of_irq_get_byname(port_node, irqname); + else + v->irq = fwnode_irq_get(port->fwnode, i); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + +/* Initialize port HW */ +static int mvpp2_port_init(struct mvpp2_port *port) +{ + struct device *dev = port->dev->dev.parent; + struct mvpp2 *priv = port->priv; + struct mvpp2_txq_pcpu *txq_pcpu; + int queue, cpu, err; + + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > + MVPP2_MAX_PORTS * priv->max_port_rxqs) + return -EINVAL; + + if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || + (port->ntxqs > MVPP2_MAX_TXQ)) + return -EINVAL; + + /* Disable port */ + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), + GFP_KERNEL); + if (!port->txqs) + return -ENOMEM; + + /* Associate physical Tx queues to this port and initialize. + * The mapping is predefined. + */ + for (queue = 0; queue < port->ntxqs; queue++) { + int queue_phy_id = mvpp2_txq_phys(port->id, queue); + struct mvpp2_tx_queue *txq; + + txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); + if (!txq->pcpu) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->id = queue_phy_id; + txq->log_id = queue; + txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->cpu = cpu; + } + + port->txqs[queue] = txq; + } + + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), + GFP_KERNEL); + if (!port->rxqs) { + err = -ENOMEM; + goto err_free_percpu; + } + + /* Allocate and initialize Rx queue for this port */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq; + + /* Map physical Rx queue to port's logical Rx queue */ + rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); + if (!rxq) { + err = -ENOMEM; + goto err_free_percpu; + } + /* Map this Rx queue to a physical queue */ + rxq->id = port->first_rxq + queue; + rxq->port = port->id; + rxq->logic_rxq = queue; + + port->rxqs[queue] = rxq; + } + + mvpp2_rx_irqs_setup(port); + + /* Create Rx descriptor rings */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->size = port->rx_ring_size; + rxq->pkts_coal = MVPP2_RX_COAL_PKTS; + rxq->time_coal = MVPP2_RX_COAL_USEC; + } + + mvpp2_ingress_disable(port); + + /* Port default configuration */ + mvpp2_defaults_set(port); + + /* Port's classifier configuration */ + mvpp2_cls_oversize_rxq_set(port); + mvpp2_cls_port_config(port); + + /* Provide an initial Rx packet size */ + port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); + + /* Initialize pools for swf */ + err = mvpp2_swf_bm_pool_init(port); + if (err) + goto err_free_percpu; + + return 0; + +err_free_percpu: + for (queue = 0; queue < port->ntxqs; queue++) { + if (!port->txqs[queue]) + continue; + free_percpu(port->txqs[queue]->pcpu); + } + return err; +} + +/* Checks if the port DT description has the TX interrupts + * described. On PPv2.1, there are no such interrupts. On PPv2.2, + * there are available, but we need to keep support for old DTs. + */ +static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, + struct device_node *port_node) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", + "tx-cpu2", "tx-cpu3" }; + int ret, i; + + if (priv->hw_version == MVPP21) + return false; + + for (i = 0; i < 5; i++) { + ret = of_property_match_string(port_node, "interrupt-names", + irqs[i]); + if (ret < 0) + return false; + } + + return true; +} + +static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, + struct fwnode_handle *fwnode, + char **mac_from) +{ + struct mvpp2_port *port = netdev_priv(dev); + char hw_mac_addr[ETH_ALEN] = {0}; + char fw_mac_addr[ETH_ALEN]; + + if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + *mac_from = "firmware node"; + ether_addr_copy(dev->dev_addr, fw_mac_addr); + return; + } + + if (priv->hw_version == MVPP21) { + mvpp21_get_mac_address(port, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + *mac_from = "hardware"; + ether_addr_copy(dev->dev_addr, hw_mac_addr); + return; + } + } + + *mac_from = "random"; + eth_hw_addr_random(dev); +} + +static void mvpp2_phylink_validate(struct net_device *dev, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + switch (state->interface) { + case PHY_INTERFACE_MODE_10GKR: + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + /* Fall-through */ + default: + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 10000baseT_Full); + /* Fall-through */ + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void mvpp22_xlg_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = readl(port->base + MVPP22_XLG_STATUS); + state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); + + state->pause = 0; + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_TX; + if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_RX; +} + +static void mvpp2_gmac_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_STATUS0); + + state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); + state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); + state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_1000BASEX: + state->speed = SPEED_1000; + break; + case PHY_INTERFACE_MODE_2500BASEX: + state->speed = SPEED_2500; + break; + default: + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + } + + state->pause = 0; + if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) + state->pause |= MLO_PAUSE_RX; + if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) + state->pause |= MLO_PAUSE_TX; +} + +static int mvpp2_phylink_mac_link_state(struct net_device *dev, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { + u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); + mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { + mvpp22_xlg_link_state(port, state); + return 1; + } + } + + mvpp2_gmac_link_state(port, state); + return 1; +} + +static void mvpp2_mac_an_restart(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) + return; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + /* The RESTART_AN bit is cleared by the h/w after restarting the AN + * process. + */ + val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 ctrl0, ctrl4; + + ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); + ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); + + if (state->pause & MLO_PAUSE_TX) + ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + + ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; + ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK; + + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); +} + +static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 an, ctrl0, ctrl2, ctrl4; + + an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + + /* Force link down */ + an &= ~MVPP2_GMAC_FORCE_LINK_PASS; + an |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state */ + ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + + an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | + MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | + MVPP2_GMAC_FORCE_LINK_DOWN); + ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; + ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + an |= MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + } else if (!phy_interface_mode_is_rgmii(state->interface)) { + an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + } + + if (state->duplex) + an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + if (phylink_test(state->advertising, Pause)) + an |= MVPP2_GMAC_FC_ADV_EN; + if (phylink_test(state->advertising, Asym_Pause)) + an |= MVPP2_GMAC_FC_ADV_ASM_EN; + + if (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG; + ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + + ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; + + if (state->speed == SPEED_1000) + an |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + an |= MVPP2_GMAC_CONFIG_MII_SPEED; + + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } + + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Check for invalid configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { + netdev_err(dev, "Invalid mode on %s\n", dev->name); + return; + } + + netif_tx_stop_all_queues(port->dev); + if (!port->has_phy) + netif_carrier_off(port->dev); + + /* Make sure the port is disabled when reconfiguring the mode */ + mvpp2_port_disable(port); + + if (port->priv->hw_version == MVPP22 && + port->phy_interface != state->interface) { + port->phy_interface = state->interface; + + /* Reconfigure the serdes lanes */ + phy_power_off(port->comphy); + mvpp22_mode_reconfigure(port); + } + + /* mac (re)configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR) + mvpp2_xlg_config(port, mode, state); + else if (phy_interface_mode_is_rgmii(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) + mvpp2_gmac_config(port, mode, state); + + if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port, state); + + /* If the port already was up, make sure it's still in the same state */ + if (state->link || !port->has_phy) { + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + if (!port->has_phy) + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } +} + +static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; + if (phy_interface_mode_is_rgmii(interface)) + val |= MVPP2_GMAC_FORCE_LINK_PASS; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_tx_wake_all_queues(dev); +} + +static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + netif_tx_stop_all_queues(dev); + mvpp2_egress_disable(port); + mvpp2_ingress_disable(port); + + /* When using link interrupts to notify phylink of a MAC state change, + * we do not want the port to be disabled (we want to receive further + * interrupts, to be notified when the port will have a link later). + */ + if (!port->has_phy) + return; + + mvpp2_port_disable(port); +} + +static const struct phylink_mac_ops mvpp2_phylink_ops = { + .validate = mvpp2_phylink_validate, + .mac_link_state = mvpp2_phylink_mac_link_state, + .mac_an_restart = mvpp2_mac_an_restart, + .mac_config = mvpp2_mac_config, + .mac_link_up = mvpp2_mac_link_up, + .mac_link_down = mvpp2_mac_link_down, +}; + +/* Ports initialization */ +static int mvpp2_port_probe(struct platform_device *pdev, + struct fwnode_handle *port_fwnode, + struct mvpp2 *priv) +{ + struct phy *comphy = NULL; + struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; + struct device_node *port_node = to_of_node(port_fwnode); + struct net_device *dev; + struct resource *res; + struct phylink *phylink; + char *mac_from = ""; + unsigned int ntxqs, nrxqs; + bool has_tx_irqs; + u32 id; + int features; + int phy_mode; + int err, i, cpu; + + if (port_node) { + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + } else { + has_tx_irqs = true; + queue_mode = MVPP2_QDIST_MULTI_MODE; + } + + if (!has_tx_irqs) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + + ntxqs = MVPP2_MAX_TXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) + nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); + else + nrxqs = MVPP2_DEFAULT_RXQ; + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); + if (!dev) + return -ENOMEM; + + phy_mode = fwnode_get_phy_mode(port_fwnode); + if (phy_mode < 0) { + dev_err(&pdev->dev, "incorrect phy mode\n"); + err = phy_mode; + goto err_free_netdev; + } + + if (port_node) { + comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); + if (IS_ERR(comphy)) { + if (PTR_ERR(comphy) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_free_netdev; + } + comphy = NULL; + } + } + + if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing port-id value\n"); + goto err_free_netdev; + } + + dev->tx_queue_len = MVPP2_MAX_TXD_MAX; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvpp2_netdev_ops; + dev->ethtool_ops = &mvpp2_eth_tool_ops; + + port = netdev_priv(dev); + port->dev = dev; + port->fwnode = port_fwnode; + port->has_phy = !!of_find_property(port_node, "phy", NULL); + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; + + err = mvpp2_queue_vectors_init(port, port_node); + if (err) + goto err_free_netdev; + + if (port_node) + port->link_irq = of_irq_get_byname(port_node, "link"); + else + port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); + if (port->link_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_deinit_qvecs; + } + if (port->link_irq <= 0) + /* the link irq is optional */ + port->link_irq = 0; + + if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) + port->flags |= MVPP2_F_LOOPBACK; + + port->id = id; + if (priv->hw_version == MVPP21) + port->first_rxq = port->id * port->nrxqs; + else + port->first_rxq = port->id * priv->max_port_rxqs; + + port->of_node = port_node; + port->phy_interface = phy_mode; + port->comphy = comphy; + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); + port->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + + port->stats_base = port->priv->lms_base + + MVPP21_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; + } else { + if (fwnode_property_read_u32(port_fwnode, "gop-port-id", + &port->gop_id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing gop-port-id value\n"); + goto err_deinit_qvecs; + } + + port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); + port->stats_base = port->priv->iface_base + + MVPP22_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; + } + + /* Alloc per-cpu and ethtool stats */ + port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); + if (!port->stats) { + err = -ENOMEM; + goto err_free_irq; + } + + port->ethtool_stats = devm_kcalloc(&pdev->dev, + ARRAY_SIZE(mvpp2_ethtool_regs), + sizeof(u64), GFP_KERNEL); + if (!port->ethtool_stats) { + err = -ENOMEM; + goto err_free_stats; + } + + mutex_init(&port->gather_stats_lock); + INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); + + mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); + + port->tx_ring_size = MVPP2_MAX_TXD_DFLT; + port->rx_ring_size = MVPP2_MAX_RXD_DFLT; + SET_NETDEV_DEV(dev, &pdev->dev); + + err = mvpp2_port_init(port); + if (err < 0) { + dev_err(&pdev->dev, "failed to init port %d\n", id); + goto err_free_stats; + } + + mvpp2_port_periodic_xon_disable(port); + + mvpp2_port_reset(port); + + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, + mvpp2_tx_proc_cb, + (unsigned long)dev); + } + } + + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO; + dev->features = features | NETIF_F_RXCSUM; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + } + + dev->vlan_features |= features; + dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + dev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9704 */ + dev->min_mtu = ETH_MIN_MTU; + /* 9704 == 9728 - 20 and rounding to 8 */ + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; + + /* Phylink isn't used w/ ACPI as of now */ + if (port_node) { + phylink = phylink_create(dev, port_fwnode, phy_mode, + &mvpp2_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_port_pcpu; + } + port->phylink = phylink; + } else { + port->phylink = NULL; + } + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register netdev\n"); + goto err_phylink; + } + netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); + + priv->port_list[priv->port_count++] = port; + + return 0; + +err_phylink: + if (port->phylink) + phylink_destroy(port->phylink); +err_free_port_pcpu: + free_percpu(port->pcpu); +err_free_txq_pcpu: + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); +err_free_stats: + free_percpu(port->stats); +err_free_irq: + if (port->link_irq) + irq_dispose_mapping(port->link_irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); +err_free_netdev: + free_netdev(dev); + return err; +} + +/* Ports removal routine */ +static void mvpp2_port_remove(struct mvpp2_port *port) +{ + int i; + + unregister_netdev(port->dev); + if (port->phylink) + phylink_destroy(port->phylink); + free_percpu(port->pcpu); + free_percpu(port->stats); + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); + mvpp2_queue_vectors_deinit(port); + if (port->link_irq) + irq_dispose_mapping(port->link_irq); + free_netdev(port->dev); +} + +/* Initialize decoding windows */ +static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, + struct mvpp2 *priv) +{ + u32 win_enable; + int i; + + for (i = 0; i < 6; i++) { + mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); + mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); + + if (i < 4) + mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); + } + + win_enable = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvpp2_write(priv, MVPP2_WIN_BASE(i), + (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | + dram->mbus_dram_target_id); + + mvpp2_write(priv, MVPP2_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable |= (1 << i); + } + + mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); +} + +/* Initialize Rx FIFO's */ +static void mvpp2_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + /* The FIFO size parameters are set depending on the maximum speed a + * given port can handle: + * - Port 0: 10Gbps + * - Port 1: 2.5Gbps + * - Ports 2 and 3: 1Gbps + */ + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); + + for (port = 2; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G + * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, + * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. + */ +static void mvpp22_tx_fifo_init(struct mvpp2 *priv) +{ + int port, size, thrs; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + if (port == 0) { + size = MVPP22_TX_FIFO_DATA_SIZE_10KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; + } else { + size = MVPP22_TX_FIFO_DATA_SIZE_3KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + } +} + +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + +/* Initialize network controller common part HW */ +static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + const struct mbus_dram_target_info *dram_target_info; + int err, i; + u32 val; + + /* MBUS windows configuration */ + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvpp2_conf_mbus_windows(dram_target_info, priv); + + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + + /* Disable HW PHY polling */ + if (priv->hw_version == MVPP21) { + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val &= ~MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } + + /* Allocate and initialize aggregated TXQs */ + priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + sizeof(*priv->aggr_txqs), + GFP_KERNEL); + if (!priv->aggr_txqs) + return -ENOMEM; + + for_each_present_cpu(i) { + priv->aggr_txqs[i].id = i; + priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); + if (err < 0) + return err; + } + + /* Fifo Init */ + if (priv->hw_version == MVPP21) { + mvpp2_rx_fifo_init(priv); + } else { + mvpp22_rx_fifo_init(priv); + mvpp22_tx_fifo_init(priv); + } + + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + + /* Allow cache snoop when transmiting packets */ + mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); + + /* Buffer Manager initialization */ + err = mvpp2_bm_init(pdev, priv); + if (err < 0) + return err; + + /* Parser default initialization */ + err = mvpp2_prs_default_init(pdev, priv); + if (err < 0) + return err; + + /* Classifier default initialization */ + mvpp2_cls_init(priv); + + return 0; +} + +static int mvpp2_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *acpi_id; + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; + struct mvpp2 *priv; + struct resource *res; + void __iomem *base; + int i; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (has_acpi_companion(&pdev->dev)) { + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + priv->hw_version = (unsigned long)acpi_id->driver_data; + } else { + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (has_acpi_companion(&pdev->dev)) { + /* In case the MDIO memory region is declared in + * the ACPI, it can already appear as 'in-use' + * in the OS. Because it is overlapped by second + * region of the network controller, make + * sure it is released, before requesting it again. + * The care is taken by mvpp2 driver to avoid + * concurrent access to this memory region. + */ + release_resource(res); + } + priv->iface_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + } + + if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { + priv->sysctrl_base = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->sysctrl_base)) + /* The system controller regmap is optional for dt + * compatibility reasons. When not provided, the + * configuration of the GoP relies on the + * firmware/bootloader. + */ + priv->sysctrl_base = NULL; + } + + mvpp2_setup_bm_pool(); + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + u32 addr_space_sz; + + addr_space_sz = (priv->hw_version == MVPP21 ? + MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); + priv->swth_base[i] = base + i * addr_space_sz; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; + + if (dev_of_node(&pdev->dev)) { + priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); + if (IS_ERR(priv->pp_clk)) + return PTR_ERR(priv->pp_clk); + err = clk_prepare_enable(priv->pp_clk); + if (err < 0) + return err; + + priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); + if (IS_ERR(priv->gop_clk)) { + err = PTR_ERR(priv->gop_clk); + goto err_pp_clk; + } + err = clk_prepare_enable(priv->gop_clk); + if (err < 0) + goto err_pp_clk; + + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + if (IS_ERR(priv->mg_core_clk)) { + priv->mg_core_clk = NULL; + } else { + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; + } + } + + priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_mg_core_clk; + priv->axi_clk = NULL; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err < 0) + goto err_mg_core_clk; + } + + /* Get system's tclk rate */ + priv->tclk = clk_get_rate(priv->pp_clk); + } else if (device_property_read_u32(&pdev->dev, "clock-frequency", + &priv->tclk)) { + dev_err(&pdev->dev, "missing clock-frequency value\n"); + return -EINVAL; + } + + if (priv->hw_version == MVPP22) { + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); + if (err) + goto err_axi_clk; + /* Sadly, the BM pools all share the same register to + * store the high 32 bits of their address. So they + * must all have the same high 32 bits, which forces + * us to restrict coherent memory to DMA_BIT_MASK(32). + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_axi_clk; + } + + /* Initialize network controller */ + err = mvpp2_init(pdev, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_axi_clk; + } + + /* Initialize ports */ + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + err = mvpp2_port_probe(pdev, port_fwnode, priv); + if (err < 0) + goto err_port_probe; + } + + if (priv->port_count == 0) { + dev_err(&pdev->dev, "no ports enabled\n"); + err = -ENODEV; + goto err_axi_clk; + } + + /* Statistics must be gathered regularly because some of them (like + * packets counters) are 32-bit registers and could overflow quite + * quickly. For instance, a 10Gb link used at full bandwidth with the + * smallest packets (64B) will overflow a 32-bit counter in less than + * 30 seconds. Then, use a workqueue to fill 64-bit counters. + */ + snprintf(priv->queue_name, sizeof(priv->queue_name), + "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), + priv->port_count > 1 ? "+" : ""); + priv->stats_queue = create_singlethread_workqueue(priv->queue_name); + if (!priv->stats_queue) { + err = -ENOMEM; + goto err_port_probe; + } + + platform_set_drvdata(pdev, priv); + return 0; + +err_port_probe: + i = 0; + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) + mvpp2_port_remove(priv->port_list[i]); + i++; + } +err_axi_clk: + clk_disable_unprepare(priv->axi_clk); + +err_mg_core_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_core_clk); +err_mg_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_clk); +err_gop_clk: + clk_disable_unprepare(priv->gop_clk); +err_pp_clk: + clk_disable_unprepare(priv->pp_clk); + return err; +} + +static int mvpp2_remove(struct platform_device *pdev) +{ + struct mvpp2 *priv = platform_get_drvdata(pdev); + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; + int i = 0; + + flush_workqueue(priv->stats_queue); + destroy_workqueue(priv->stats_queue); + + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); + mvpp2_port_remove(priv->port_list[i]); + } + i++; + } + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvpp2_bm_pool_destroy(pdev, priv, bm_pool); + } + + for_each_present_cpu(i) { + struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; + + dma_free_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + aggr_txq->descs, + aggr_txq->descs_dma); + } + + if (is_acpi_node(port_fwnode)) + return 0; + + clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(priv->mg_core_clk); + clk_disable_unprepare(priv->mg_clk); + clk_disable_unprepare(priv->pp_clk); + clk_disable_unprepare(priv->gop_clk); + + return 0; +} + +static const struct of_device_id mvpp2_match[] = { + { + .compatible = "marvell,armada-375-pp2", + .data = (void *)MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = (void *)MVPP22, + }, + { } +}; +MODULE_DEVICE_TABLE(of, mvpp2_match); + +static const struct acpi_device_id mvpp2_acpi_match[] = { + { "MRVL0110", MVPP22 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); + +static struct platform_driver mvpp2_driver = { + .probe = mvpp2_probe, + .remove = mvpp2_remove, + .driver = { + .name = MVPP2_DRIVER_NAME, + .of_match_table = mvpp2_match, + .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), + }, +}; + +module_platform_driver(mvpp2_driver); + +MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c new file mode 100644 index 000000000000..6bb69f086794 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -0,0 +1,2467 @@ +/* + * Header Parser helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +/* Update parser tcam and sram hw entries */ +static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + + return 0; +} + +/* Initialize tcam entry from hw */ +static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) +{ + int i; + + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) +{ + priv->prs_shadow[index].valid = true; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, + unsigned int ri, unsigned int ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; + pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; +} + +/* Update mask for single port in tcam sw entry */ +static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + unsigned int port, bool add) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + if (add) + pe->tcam.byte[enable_off] &= ~(1 << port); + else + pe->tcam.byte[enable_off] |= 1 << port; +} + +/* Update port map in tcam sw entry */ +static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + unsigned int ports) +{ + unsigned char port_mask = MVPP2_PRS_PORT_MASK; + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; + pe->tcam.byte[enable_off] &= ~port_mask; + pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; +} + +/* Obtain port map from tcam sw entry */ +static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char byte, + unsigned char enable) +{ + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; +} + +/* Get byte of data and its enable bits from tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) +{ + *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; + *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; +} + +/* Compare tcam data bytes with a pattern */ +static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, + u16 data) +{ + int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); + u16 tcam_data; + + tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; + if (tcam_data != data) + return false; + return true; +} + +/* Update ai bits in tcam sw entry */ +static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int enable) +{ + int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam.byte[ai_idx] |= 1 << i; + else + pe->tcam.byte[ai_idx] &= ~(1 << i); + } + + pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; +} + +/* Get ai bits from tcam sw entry */ +static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; +} + +/* Set ethertype in tcam sw entry */ +static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, + unsigned short ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); +} + +/* Clear bits in sram sw entry */ +static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); +} + +/* Update ri bits in sram sw entry */ +static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + int ri_off = MVPP2_PRS_SRAM_RI_OFFS; + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + int ai_off = MVPP2_PRS_SRAM_AI_OFFS; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + u8 bits; + int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); + int ai_en_off = ai_off + 1; + int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + + bits = (pe->sram.byte[ai_off] >> ai_shift) | + (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup interation + */ +static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + unsigned int lu) +{ + int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, + unsigned int op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = + (unsigned char)shift; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + unsigned int type, int offset, + unsigned int op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] |= + (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> + (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= + (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + u8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return tid; + } + + return -ENOENT; +} + +/* Return first free tcam index, seeking from start to end */ +static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, + unsigned char end) +{ + int tid; + + if (start > end) + swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return -EINVAL; +} + +/* Enable/disable dropping all mac da's */ +static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to unicast or multicast promiscuous mode */ +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; + + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; + } else { + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; + } + + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = tid; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); + + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, + bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* Set result info bits to 'single vlan' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, + bool add, bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_bits, ai_bits; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(&pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return tid; + } + + return -ENOENT; +} + +/* Add/update single/triple vlan entry */ +static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, + unsigned int port_map) +{ + struct mvpp2_prs_entry pe; + int tid_aux, tid; + int ret = 0; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) + return -EINVAL; + + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + mvpp2_prs_match_etype(&pe, 0, tpid); + + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* Get first free double vlan ai number */ +static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + int i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return -EINVAL; +} + +/* Search for existing double vlan entry */ +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_mask; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && + mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return tid; + } + + return -ENOENT; +} + +/* Add or update double vlan entry */ +static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2, + unsigned int port_map) +{ + int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) + return ai; + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) + return -ERANGE; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; + + priv->prs_double_vlans[ai] = true; + + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_IGMP)) + return -EINVAL; + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return -EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, + int lu_max, int offset) +{ + u32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static void mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static void mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* Create dummy entries for drop all and promiscuous modes */ + mvpp2_prs_mac_drop_all_set(priv, 0, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); +} + +/* Set default entries for various types of dsa packets */ +static void mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static int mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Clear tcam data before updating */ + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IPv6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static int mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static int mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Find tcam entry with matched pair */ +static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, + u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PE_VID_FILT_RANGE_START; + tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) + mvpp2_prs_vid_entry_remove(port, tid); + } +} + +/* Remove VID filering entry for this port */ +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Parser default initialization */ +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + mvpp2_prs_vid_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(pdev, priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const u8 *da, unsigned char *mask) +{ + unsigned char tcam_byte, tcam_mask; + int index; + + for (index = 0; index < ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return false; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return false; + } + + return true; +} + +/* Find tcam entry with matched pair */ +static int +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, + unsigned char *mask, int udf_type) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned int entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && + entry_pmap == pmap) + return tid; + } + + return -ENOENT; +} + +/* Update parser's mac da entry */ +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this already exist */ + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (tid < 0) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (pmap == 0) { + if (add) + return -EINVAL; + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + /* Set result info bits */ + if (is_broadcast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_BCAST; + } else if (is_multicast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_MCAST; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } + + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + /* Remove old parser entry */ + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); + if (err) + return err; + + /* Add new parser entry */ + err = mvpp2_prs_mac_da_accept(port, da, true); + if (err) + return err; + + /* Set addr in the device */ + ether_addr_copy(dev->dev_addr, da); + + return 0; +} + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); + } +} + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return -EINVAL; + } + + return 0; +} + +/* Set prs flow for the port */ +int mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (tid < 0) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h new file mode 100644 index 000000000000..22fbbc4c8b28 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -0,0 +1,314 @@ +/* + * Header Parser definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include +#include + +#include "mvpp2.h" + +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf +#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ + (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) +#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ + (((offs) * 2) - ((offs) % 2) + 2) +#define MVPP2_PRS_TCAM_AI_BYTE 16 +#define MVPP2_PRS_TCAM_PORT_BYTE 17 +#define MVPP2_PRS_TCAM_LU_BYTE 20 +#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) +#define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +/* reserved */ +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED true +#define MVPP2_PRS_UNTAGGED false +#define MVPP2_PRS_EDSA true +#define MVPP2_PRS_DSA false + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +union mvpp2_prs_tcam_entry { + u32 word[MVPP2_PRS_TCAM_WORDS]; + u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; +}; + +union mvpp2_prs_sram_entry { + u32 word[MVPP2_PRS_SRAM_WORDS]; + u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; +}; + +struct mvpp2_prs_entry { + u32 index; + union mvpp2_prs_tcam_entry tcam; + union mvpp2_prs_sram_entry sram; +}; + +struct mvpp2_prs_shadow { + bool valid; + bool finish; + + /* Lookup ID */ + int lu; + + /* User defined offset */ + int udf; + + /* Result info */ + u32 ri; + u32 ri_mask; +}; + +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); + +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); + +int mvpp2_prs_def_flow(struct mvpp2_port *port); + +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); + +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port); + +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port); + +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add); + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port); + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); + +#endif -- cgit v1.2.3 From 6c251711b37ff14e2507bbc2401ac3ef0935ceb1 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:01 +0100 Subject: net: hns3: Disable vf vlan filter when vf vlan table is full This is only 128 entries for hardware's vf vlan table, when the vf table is full, the firmware will disable the vf vlan filter and return a resp_code of HCLGE_VF_VLAN_NO_ENTRY to driver. This patch checks the if resp_code from firmware is HCLGE_VF_VLAN_NO_ENTRY, if yes, then print a warning and return ok to the caller. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 69166858a6dc..4ca53189d48d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4525,9 +4525,16 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, } if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 if (!req0->resp_code || req0->resp_code == 1) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + dev_err(&hdev->pdev->dev, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); -- cgit v1.2.3 From 3b75c3df599d0068b382ef1f22396dc5d48c5a74 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 1 Jun 2018 17:52:02 +0100 Subject: net: hns3: Add support for IFF_ALLMULTI flag This patch adds support for IFF_ALLMULTI flag to HNS3 PF and VF driver. Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 3 ++- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 ++++-- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 ++++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 5 +++-- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 11 +++++++---- 6 files changed, 21 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index f250c592a218..e8244a51ce71 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -316,7 +316,8 @@ struct hnae3_ae_ops { int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); - void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 05290129793f..0a6876a9b6d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -415,9 +415,11 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) if (h->ae_algo->ops->set_promisc_mode) { if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, true, true); + else if (netdev->flags & IFF_ALLMULTI) + h->ae_algo->ops->set_promisc_mode(h, false, true); else - h->ae_algo->ops->set_promisc_mode(h, 0); + h->ae_algo->ops->set_promisc_mode(h, false, false); } if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) netdev_err(netdev, "sync uc address fail\n"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 8f8cc2413656..40c0425b4023 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -95,7 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en); + h->ae_algo->ops->set_promisc_mode(h, en, en); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4ca53189d48d..18027bc543c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3580,13 +3580,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } -static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + vport->vport_id); hclge_cmd_set_promisc_mode(hdev, ¶m); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 31f3d9a43d8d..d299805c430b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en = req->msg[1] ? true : false; + bool en_uc = req->msg[1] ? true : false; + bool en_mc = req->msg[2] ? true : false; struct hclge_promisc_param param; /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); return hclge_cmd_set_promisc_mode(vport->back, ¶m); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 266cdcba506e..e28e0db0db5f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) return 0; } -static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, + bool en_uc_pmc, bool en_mc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; @@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en; + req->msg[1] = en_uc_pmc ? 1 : 0; + req->msg[2] = en_mc_pmc ? 1 : 0; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) return status; } -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, + bool en_uc_pmc, bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - hclgevf_cmd_set_promisc_mode(hdev, en); + hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, -- cgit v1.2.3 From 5ec2a51ef8e2cb1fd1f04a8a7170001ab94d36fe Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 1 Jun 2018 17:52:03 +0100 Subject: net: hns3: Add repeat address checking for setting mac address Add checking for new mac address. It doesn't need to config the mac vlan table if it's already in use. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 0a6876a9b6d4..fe54564abdd5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1132,6 +1132,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + mac_addr->sa_data); + return 0; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); -- cgit v1.2.3 From c7fc8fb6193428e107a4d51333c158bdc226b26c Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 1 Jun 2018 17:52:04 +0100 Subject: net: hns3: Fix setting mac address error When doing function reset or insmod hns3 dirver after rmmod, the entries of mac vlan table are not cleared, which may cause init mac address failed. This patch fixes it by clearing the old mac address when doing function reset or rmmod hns3 driver. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index fe54564abdd5..235eea1393bb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3007,6 +3007,15 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } +static void hns3_uninit_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); +} + static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -3135,6 +3144,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + free_netdev(netdev); } @@ -3451,6 +3462,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + return ret; } -- cgit v1.2.3 From f5be79673fc4c925708c99ec37d77e0a2c3cd30b Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:05 +0100 Subject: net: hns3: Fix for service_task not running problem after resetting When hclge_ae_stop is called during resetting, it will cancel the service_task by calling cancel_work_sync, which may cause the service_task to exit without clearing HCLGE_STATE_SERVICE_SCHED bit. If this happens, the service_task will never run again. This patch fixes this problem by clearing it after calling cancel_work_sync in hclge_ae_stop. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 + drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 18027bc543c1..746987f4ebbc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3748,6 +3748,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) return; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e28e0db0db5f..5d28052b3ea5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1337,6 +1337,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) hclgevf_reset_tqp_stats(handle); del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); hclgevf_update_link_status(hdev, 0); } -- cgit v1.2.3 From cd8c5c269b1d807028e939293002c989e7c07e51 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:06 +0100 Subject: net: hns3: Fix for hclge_reset running repeatly problem When hardware sends the HCLGE_VECTOR0_EVENT_RST event through hclge_misc_irq_handle, currently driver enables misc_vector in the interrupt handle, and hardware generates the same interrupt for the same reset event again and again until the reset is complete, which causes hclge_reset running repeatly problem. This patch fixes by enabling the misc_vector after reset is complete. Fixes: 4ed340ab8f49 ("net: hns3: Add reset process in hclge_main") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 40 ++++++++++++++++++---- 1 file changed, 34 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 746987f4ebbc..fb44b1ec4669 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2587,9 +2587,11 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) break; } - /* we should clear the source of interrupt */ - hclge_clear_event_cause(hdev, event_cause, clearval); - hclge_enable_vector(&hdev->misc_vector, true); + /* clear the source of interrupt if it is not cause by reset */ + if (event_cause != HCLGE_VECTOR0_EVENT_RST) { + hclge_clear_event_cause(hdev, event_cause, clearval); + hclge_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -2777,6 +2779,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, return rst_level; } +static void hclge_clear_reset_cause(struct hclge_dev *hdev) +{ + u32 clearval = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + break; + case HNAE3_GLOBAL_RESET: + clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + break; + case HNAE3_CORE_RESET: + clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + break; + default: + dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", + hdev->reset_type); + break; + } + + if (!clearval) + return; + + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + hclge_enable_vector(&hdev->misc_vector, true); +} + static void hclge_reset(struct hclge_dev *hdev) { /* perform reset of the stack & ae device for a client */ @@ -2789,6 +2818,8 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); rtnl_unlock(); + + hclge_clear_reset_cause(hdev); } else { /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); @@ -5661,9 +5692,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Enable MISC vector(vector0) */ - hclge_enable_vector(&hdev->misc_vector, true); - dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); -- cgit v1.2.3 From 9617f66867b09b326cc932416be2431c5b91c8d8 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:07 +0100 Subject: net: hns3: Fix for phy not link up problem after resetting When resetting, phy_state_machine may be accessing the phy through firmware if the phy is not stopped or disconnected, which will cause firemware timeout problem because the firmware is busy processing the reset request. This patch fixes it by disabling the phy when resetting. Fixes: b940aeae0ed6 ("net: hns3: never send command queue message to IMP when reset") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index fb44b1ec4669..58fef5e56831 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3761,9 +3761,6 @@ static int hclge_ae_start(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - return 0; - ret = hclge_mac_start_phy(hdev); if (ret) return ret; @@ -3781,8 +3778,10 @@ static void hclge_ae_stop(struct hnae3_handle *handle) cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_mac_stop_phy(hdev); return; + } for (i = 0; i < vport->alloc_tqps; i++) hclge_tqp_enable(hdev, i, 0, false); -- cgit v1.2.3 From f0ad97ac12f0e91cf3846025256ca97845bcfccd Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:08 +0100 Subject: net: hns3: Add missing break in misc_irq_handle There is a break missing in the switch/case handling in hclge_misc_irq_handle, which causes the log to output uncorrectly. This patch adds the missing break, and change the dev_dbg to dev_warn in order to better catch the error. Fixes: c1a81619d73a ("net: hns3: Add mailbox interrupt handling to PF driver") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 58fef5e56831..19e56896887d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2580,10 +2580,10 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * mbx messages reported by this interrupt. */ hclge_mbx_task_schedule(hdev); - + break; default: - dev_dbg(&hdev->pdev->dev, - "received unknown or unhandled event of vector0\n"); + dev_warn(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); break; } -- cgit v1.2.3 From 3db084d28dc04e6ebe9123e0d4f6e8ef5a775ff0 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 1 Jun 2018 17:52:09 +0100 Subject: net: hns3: Fix for vxlan tx checksum bug when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL and it is udp packet, which has a dest port as the IANA assigned. the hardware is expected to do the checksum offload, but the hardware will not do the checksum offload when udp dest port is 4789. This patch fixes it by doing the checksum in software. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 235eea1393bb..e572804169cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -655,6 +655,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, } } +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ +#define IANA_VXLAN_PORT 4789 + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + return false; + + skb_checksum_help(skb); + + return true; +} + static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) @@ -743,6 +769,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L4T_TCP); break; case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) + break; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, -- cgit v1.2.3 From 40cca1c587c1c39fcc7fa1b2c5d315d72361dfe1 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Fri, 1 Jun 2018 17:52:10 +0100 Subject: net: hns3: Optimize the PF's process of updating multicast MAC In the current process, the multicast MAC is added to both MAC_VLAN table and MTA table, this will reduce the utilization of the resource. This patch improves the process of adding multicast MAC address, the new process starts using the MTA table to add multicast MAC after the MAC_VLAN table is full, and the MTA is disable if it is no longer used. Signed-off-by: Xi Wang Reviewed-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 1 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 138 ++++++++++++++++++--- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 12 +- 4 files changed, 136 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e8244a51ce71..8acb1d116a02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -353,6 +353,7 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); + int (*update_mta_status)(struct hnae3_handle *handle); void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e572804169cd..f2b31d278bc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -423,9 +423,13 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) } if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) + if (netdev->flags & IFF_MULTICAST) { if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) netdev_err(netdev, "sync mc address fail\n"); + + if (h->ae_algo->ops->update_mta_status) + h->ae_algo->ops->update_mta_status(h); + } } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 19e56896887d..2a801344eafb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2288,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct hclge_vport *vport; int mtu; int ret; + int i; ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); if (ret) { @@ -2301,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; /* Initialize the MTA table work mode */ - hdev->accept_mta_mc = true; hdev->enable_mta = true; hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; @@ -2314,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } - ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->accept_mta_mc = false; + + memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); + ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", ret); + return ret; + } } ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); @@ -4005,9 +4012,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, return ret; } + if (enable) + set_bit(idx, vport->mta_shadow); + else + clear_bit(idx, vport->mta_shadow); + return 0; } +static int hclge_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct net_device *netdev = handle->kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; + + memset(mta_status, 0, sizeof(mta_status)); + + /* update mta_status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclge_update_mta_status_common(vport, mta_status, + 0, HCLGE_MTA_TBL_SIZE, true); +} + +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter) +{ + struct hclge_dev *hdev = vport->back; + u16 update_max = idx + count; + u16 check_max; + int ret = 0; + bool used; + u16 i; + + /* setup mta check range */ + if (update_filter) { + i = 0; + check_max = HCLGE_MTA_TBL_SIZE; + } else { + i = idx; + check_max = update_max; + } + + used = false; + /* check and update all mta item */ + for (; i < check_max; i++) { + /* ignore unused item */ + if (!test_bit(i, vport->mta_shadow)) + continue; + + /* if i in update range then update it */ + if (i >= idx && i < update_max) + if (!test_bit(i - idx, status)) + hclge_set_mta_table_item(vport, i, false); + + if (!used && test_bit(i, vport->mta_shadow)) + used = true; + } + + /* no longer use mta, disable it */ + if (vport->accept_mta_mc && update_filter && !used) { + ret = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + false); + if (ret) + dev_err(&hdev->pdev->dev, + "disable func mta filter fail ret=%d\n", + ret); + else + vport->accept_mta_mc = false; + } + + return ret; +} + static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4275,9 +4361,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); + /* If mc mac vlan table is full, use MTA table */ + if (status == -ENOSPC) { + if (!vport->accept_mta_mc) { + status = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + true); + if (status) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", + status); + return status; + } + vport->accept_mta_mc = true; + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + } return status; } @@ -4297,7 +4399,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; struct hclge_desc desc[3]; - u16 tbl_idx; /* mac addr check */ if (!is_multicast_ether_addr(addr)) { @@ -4326,17 +4427,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } else { - /* This mac addr do not exist, can't delete it */ - dev_err(&hdev->pdev->dev, - "Rm multicast mac addr failed, ret = %d.\n", - status); - return -EIO; + /* Maybe this mac address is in mta table, but it cannot be + * deleted here because an entry of mta represents an address + * range rather than a specific address. the delete action to + * all entries will take effect in update_mta_status called by + * hns3_nic_set_rx_mode. + */ + status = 0; } - /* Set MTB table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, false); - return status; } @@ -6137,6 +6236,7 @@ static const struct hnae3_ae_ops hclge_ops = { .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, + .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7fcabdeb4ce9..7488534528cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -61,6 +61,8 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 +#define HCLGE_MTA_TBL_SIZE 4096 + #define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_PHY_PAGE_MDIX 0 @@ -559,7 +561,6 @@ struct hclge_dev { enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ - bool accept_mta_mc; /* Whether accept mta filter multicast */ struct hclge_vlan_type_cfg vlan_type_cfg; @@ -620,6 +621,9 @@ struct hclge_vport { struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; + + bool accept_mta_mc; /* whether to accept mta filter multicast */ + unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -637,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable); +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter); + struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, -- cgit v1.2.3 From 3a678b5806e66d0b75086bf423ecaf80ff0237c7 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Fri, 1 Jun 2018 17:52:11 +0100 Subject: net: hns3: Optimize the VF's process of updating multicast MAC In the update flow of the new PF driver, if a multicast address is in mta table, the VF deletion action will not take effect. This patch adds the VF adaptation according to the new flow of PF'driver. Signed-off-by: Xi Wang Reviewed-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 58 +++++++++- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 128 ++++++++++++++++++++- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 4 + 4 files changed, 187 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 519e2bd6aa60..be9dc08ccf67 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index d299805c430b..7541cb9b71ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -231,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } +static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, + u8 *msg, u8 idx, bool is_end) +{ +#define HCLGE_MTA_STATUS_MSG_SIZE 13 +#define HCLGE_MTA_STATUS_MSG_BITS \ + (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGE_MTA_STATUS_MSG_END_BITS \ + (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) + unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_ofs; + u8 msg_bit; + + tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : + HCLGE_MTA_STATUS_MSG_BITS; + + /* set msg field */ + msg_ofs = 0; + msg_bit = 0; + memset(status, 0, sizeof(status)); + for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { + if (msg[msg_ofs] & BIT(msg_bit)) + set_bit(tbl_idx, status); + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + return hclge_update_mta_status_common(vport, + status, idx * HCLGE_MTA_STATUS_MSG_BITS, + tbl_cnt, is_end); +} + static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; + u8 resp_len = 0; + u8 resp_data; int status; if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { @@ -248,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, bool enable = mbx_req->msg[2]; status = hclge_cfg_func_mta_filter(hdev, func_id, enable); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { + resp_data = hdev->mta_mac_sel_type; + resp_len = sizeof(u8); + gen_resp = true; + status = 0; + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { + /* mta status update msg format + * msg[2.6 : 2.0] msg index + * msg[2.7] msg is end + * msg[15 : 3] mta status bits[103 : 0] + */ + bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; + + status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], + mbx_req->msg[2] & 0x7F, + is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -256,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, } if (gen_resp) - hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + hclge_gen_resp_to_vf(vport, mbx_req, status, + &resp_data, resp_len); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 5d28052b3ea5..dd8e8e6718dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -739,6 +739,126 @@ static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) msg, 1, false, NULL, 0); } +static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) +{ + u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, + NULL, 0, true, &resp_msg, sizeof(u8)); + + if (ret) { + dev_err(&hdev->pdev->dev, + "Read mta type fail, ret=%d.\n", ret); + return ret; + } + + if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { + dev_err(&hdev->pdev->dev, + "Read mta type invalid, resp=%d.\n", resp_msg); + return -EINVAL; + } + + hdev->mta_mac_sel_type = resp_msg; + + return 0; +} + +static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, + const u8 *addr) +{ + u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; + u16 high_val = addr[1] | (addr[0] << 8); + + return (high_val >> rsh) & 0xfff; +} + +static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, + unsigned long *status) +{ +#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 +#define HCLGEVF_MTA_STATUS_MSG_BITS \ + (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ + (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_cnt; + u8 msg_idx; + int ret; + + msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, + HCLGEVF_MTA_STATUS_MSG_BITS); + tbl_idx = 0; + msg_idx = 0; + while (msg_cnt--) { + u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; + u8 *p = &msg[1]; + u8 msg_ofs; + u8 msg_bit; + + memset(msg, 0, sizeof(msg)); + + /* set index field */ + msg[0] = 0x7F & msg_idx; + + /* set end flag field */ + if (msg_cnt == 0) { + msg[0] |= 0x80; + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; + } else { + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; + } + + /* set status field */ + msg_ofs = 0; + msg_bit = 0; + while (tbl_cnt--) { + if (test_bit(tbl_idx, status)) + p[msg_ofs] |= BIT(msg_bit); + + tbl_idx++; + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, + msg, sizeof(msg), false, NULL, 0); + if (ret) + break; + + msg_idx++; + } + + return ret; +} + +static int hclgevf_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct net_device *netdev = hdev->nic.kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; + + /* clear status */ + memset(mta_status, 0, sizeof(mta_status)); + + /* update status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclgevf_do_update_mta_status(hdev, mta_status); +} + static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1669,12 +1789,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize VF's MTA */ - hdev->accept_mta_mc = true; - ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); + /* Initialize mta type for this VF */ + ret = hclgevf_cfg_func_mta_type(hdev); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to set mta filter mode\n", ret); + "failed(%d) to initialize MTA type\n", ret); goto err_config; } @@ -1829,6 +1948,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, + .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 9763e742e6fb..0656e8e5c5f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -48,6 +48,9 @@ #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) +#define HCLGEVF_MTA_TBL_SIZE 4096 +#define HCLGEVF_MTA_TYPE_SEL_MAX 4 + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ @@ -152,6 +155,7 @@ struct hclgevf_dev { int *vector_irq; bool accept_mta_mc; /* whether to accept mta filter multicast */ + u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ -- cgit v1.2.3 From 4b3e85a52ae4ea516fe297acad32872bc13bf620 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 31 May 2018 18:01:31 +0300 Subject: net/mlx5e: IPOIB, Fix overflowing SQ WQE memset IPoIB WQE size is larger than a single WQEBB. Must not fetch the WQE, and surely not memset it, until it is guaranteed that there are enough WQEBBs available before getting to SQ/frag edge. Fixes: 043dc78ecf07 ("net/mlx5e: TX, Use actual WQE size for SQ edge fill") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index aafd75257fd0..9829ee02de31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -653,8 +653,6 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, int num_dma; __be16 mss; - mlx5i_sq_fetch_wqe(sq, &wqe, &pi); - /* Calc ihs and ds cnt, no writes to wqe yet */ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { @@ -686,10 +684,12 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); - mlx5i_sq_fetch_wqe(sq, &wqe, &pi); } + mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + /* fill wqe */ wi = &sq->db.wqe_info[pi]; cseg = &wqe->ctrl; -- cgit v1.2.3 From c90262f8468acfed573e195de9443007dad27451 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 31 May 2018 18:04:23 +0300 Subject: net/mlx5e: IPOIB, Add a missing skb_pull A call to mlx5e_tx_skb_pull_inline was mistakenly dropped in the cited patch. Get it back. Fixes: 043dc78ecf07 ("net/mlx5e: TX, Use actual WQE size for SQ edge fill") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 9829ee02de31..725c06221e95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -705,6 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (ihs) { memcpy(eseg->inline_hdr.start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); dseg += ds_cnt_inl; } -- cgit v1.2.3 From 8bfaf07f780683948a3183d3b14a76dfd50bf899 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 29 May 2018 10:54:47 +0300 Subject: net/mlx5e: Present SW stats when state is not opened The driver can present all SW stats even when the state not opened. Fixed get strings, count and stats to support it. In addition, fix tc2txq to hold a static mapping which doesn't depend on the amount of open channels, and cannot have the same value on two different cells while moving between configurations. Example: - OOB 16 channels - Change to 2 channels, 8 TCs - tc2txq[15][0] == tc2txq[1][7] == 15 This will cause multiple appearances of the same TX index in statistics output. Fixes: 76c3810bade3 ("net/mlx5e: Avoid reset netdev stats on configuration changes") Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 19 +++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 10 ---------- 2 files changed, 13 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9b19863b059d..adc55de6d4f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2633,15 +2633,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } -static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) +static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) { - struct mlx5e_channel *c; - struct mlx5e_txqsq *sq; + int max_nch = priv->profile->max_nch(priv->mdev); int i, tc; - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + for (i = 0; i < max_nch; i++) for (tc = 0; tc < priv->profile->max_tc; tc++) - priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; + priv->channel_tc2txq[i][tc] = i + tc * max_nch; +} + +static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) +{ + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int i, tc; for (i = 0; i < priv->channels.num; i++) { c = priv->channels.c[i]; @@ -2661,7 +2667,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->channels.num); - mlx5e_build_channels_tx_maps(priv); + mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); write_lock(&priv->stats_lock); priv->channels_active = true; @@ -4446,6 +4452,7 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); + mlx5e_build_tc2txq_maps(priv); mlx5e_vxlan_init(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 776b4d68e156..3b2aed43f660 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -1161,9 +1161,6 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { int max_nch = priv->profile->max_nch(priv->mdev); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return 0; - return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + (NUM_SQ_STATS * max_nch * priv->max_opened_tc); @@ -1175,9 +1172,6 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -1187,7 +1181,6 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); - /* priv->channel_tc2txq[i][tc] is valid only when device is open */ for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) @@ -1204,9 +1197,6 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = -- cgit v1.2.3 From 6c63efe4cfabf230a8ed4b1d880249875ffdac13 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 29 May 2018 11:06:31 +0300 Subject: net/mlx5e: Remove redundant active_channels indication Now, when all channels stats are saved regardless of the channel's state {open, closed}, we can safely remove this indication and the stats spin lock which protects it. Fixes: 76c3810bade3 ("net/mlx5e: Avoid reset netdev stats on configuration changes") Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 ------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 6 ------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 5 ----- 4 files changed, 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1c04df043e07..372cdf8a496c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -777,8 +777,6 @@ struct mlx5e_priv { struct mutex state_lock; /* Protects Interface state */ struct mlx5e_rq drop_rq; - rwlock_t stats_lock; /* Protects channels SW stats updates */ - bool channels_active; struct mlx5e_channels channels; u32 tisn[MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index adc55de6d4f4..dd119bad102d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2669,9 +2669,6 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); - write_lock(&priv->stats_lock); - priv->channels_active = true; - write_unlock(&priv->stats_lock); netif_tx_start_all_queues(priv->netdev); if (MLX5_VPORT_MANAGER(priv->mdev)) @@ -2693,9 +2690,6 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); - write_lock(&priv->stats_lock); - priv->channels_active = false; - write_unlock(&priv->stats_lock); mlx5e_deactivate_channels(&priv->channels); } @@ -4269,7 +4263,6 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, profile->max_nch(mdev), netdev->mtu); mutex_init(&priv->state_lock); - rwlock_init(&priv->stats_lock); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index de6364125f0f..1fb4835eac72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -130,10 +130,6 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) struct mlx5e_sq_stats *sq_stats; int i, j; - read_lock(&priv->stats_lock); - if (!priv->channels_active) - goto out; - memset(s, 0, sizeof(*s)); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -150,8 +146,6 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) s->tx_bytes += sq_stats->bytes; } } -out: - read_unlock(&priv->stats_lock); } static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 3b2aed43f660..697dc7397ba2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -114,9 +114,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) int i; memset(s, 0, sizeof(*s)); - read_lock(&priv->stats_lock); - if (!priv->channels_active) - goto out; for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { struct mlx5e_channel_stats *channel_stats = @@ -177,8 +174,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) } memcpy(&priv->stats.sw, s, sizeof(*s)); -out: - read_unlock(&priv->stats_lock); } static const struct counter_desc q_stats_desc[] = { -- cgit v1.2.3 From 93edcb3a75897a60ec294851043088f59f4475e1 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Thu, 3 May 2018 12:40:30 +0300 Subject: net/mlx5e: Increase aRFS flow tables size Increase the aRFS flow table size to 64k so it could contain up to 64k different streams. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index f64b5e78519b..75e4308ba786 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -213,7 +213,7 @@ out: } #define MLX5E_ARFS_NUM_GROUPS 2 -#define MLX5E_ARFS_GROUP1_SIZE BIT(12) +#define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1) #define MLX5E_ARFS_GROUP2_SIZE BIT(0) #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\ MLX5E_ARFS_GROUP2_SIZE) -- cgit v1.2.3 From 250a42b6a764a7954a7a7a137bc71883f05657cb Mon Sep 17 00:00:00 2001 From: Adi Nissim Date: Sun, 1 Apr 2018 16:54:27 +0300 Subject: net/mlx5e: Support configurable MTU for vport representors The representor MTU was hard coded to 1500 bytes. Allow setting arbitrary MTU values up to the max supported by the FW. Signed-off-by: Adi Nissim Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 14 ++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 14 ++++++++++++++ 3 files changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 372cdf8a496c..a23fa47fbbcc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1102,6 +1102,10 @@ void mlx5e_update_stats_work(struct work_struct *work); int mlx5e_bits_invert(unsigned long a, int size); +typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb); + /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dd119bad102d..ab7b2a4e6edc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3495,7 +3495,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, return features; } -static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; @@ -3522,7 +3523,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (!reset) { params->sw_mtu = new_mtu; - mlx5e_set_dev_port_mtu(priv); + set_mtu_cb(priv); netdev->mtu = params->sw_mtu; goto out; } @@ -3531,7 +3532,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); netdev->mtu = new_channels.params.sw_mtu; out: @@ -3539,6 +3540,11 @@ out: return err; } +static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); +} + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) { struct hwtstamp_config config; @@ -4033,7 +4039,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, - .ndo_change_mtu = mlx5e_change_mtu, + .ndo_change_mtu = mlx5e_change_nic_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 1fb4835eac72..8ab4c96b7f7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -900,6 +900,11 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, NULL); +} + static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_open = mlx5e_rep_open, .ndo_stop = mlx5e_rep_close, @@ -909,6 +914,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, + .ndo_change_mtu = mlx5e_change_rep_mtu, }; static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, @@ -935,6 +941,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, static void mlx5e_build_rep_netdev(struct net_device *netdev) { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; + netdev->netdev_ops = &mlx5e_netdev_ops_rep; netdev->watchdog_timeo = 15 * HZ; @@ -947,6 +957,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_TC; eth_hw_addr_random(netdev); + + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); } static void mlx5e_init_rep(struct mlx5_core_dev *mdev, -- cgit v1.2.3 From 98db16bab59fed3834cd0c0f4a3dbca57a61fad5 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 29 May 2018 16:39:04 -0700 Subject: net/mlx5: FPGA, Handle QP error event Add handlers for this event to perform graceful teardown of the device. Signed-off-by: Ilan Tayari Signed-off-by: Adi Nissim Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fpga/core.c | 28 ++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 26caa0475985..436a8136f26f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -50,6 +50,11 @@ static const char *const mlx5_fpga_error_strings[] = { "Temperature Critical", }; +static const char * const mlx5_fpga_qp_error_strings[] = { + "Null Syndrome", + "Retry Counter Expired", + "RNR Expired", +}; static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void) { struct mlx5_fpga_device *fdev = NULL; @@ -271,23 +276,38 @@ static const char *mlx5_fpga_syndrome_to_string(u8 syndrome) return "Unknown"; } +static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome) +{ + if (syndrome < ARRAY_SIZE(mlx5_fpga_qp_error_strings)) + return mlx5_fpga_qp_error_strings[syndrome]; + return "Unknown"; +} + void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) { struct mlx5_fpga_device *fdev = mdev->fpga; const char *event_name; bool teardown = false; unsigned long flags; + u32 fpga_qpn; u8 syndrome; - if (event != MLX5_EVENT_TYPE_FPGA_ERROR) { + switch (event) { + case MLX5_EVENT_TYPE_FPGA_ERROR: + syndrome = MLX5_GET(fpga_error_event, data, syndrome); + event_name = mlx5_fpga_syndrome_to_string(syndrome); + break; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); + event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); + fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); + break; + default: mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", event); return; } - syndrome = MLX5_GET(fpga_error_event, data, syndrome); - event_name = mlx5_fpga_syndrome_to_string(syndrome); - spin_lock_irqsave(&fdev->state_lock, flags); switch (fdev->state) { case MLX5_FPGA_STATUS_SUCCESS: -- cgit v1.2.3 From 5e7d77a9c5738457cdaa6bc230799c8f80733481 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 24 May 2018 13:44:24 +0300 Subject: net/mlx5e: TX, Obsolete maintaining local copies of skb->len/data Instead of maintaining a local copy of skb->len/data and updating it upon every copy to the WQE inline part, just calculate it once when needed, using the ihs. This obsoletes the function mlx5e_tx_skb_pull_inline. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 42 +++++++------------------ 1 file changed, 12 insertions(+), 30 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 725c06221e95..f29deb44bf3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -188,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, return min_t(u16, hlen, skb_headlen(skb)); } -static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, - unsigned int *skb_len, - unsigned int len) -{ - *skb_len -= len; - *skb_data += len; -} - -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, - unsigned char **skb_data, - unsigned int *skb_len) +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, *skb_data, cpy1_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); + memcpy(vhdr, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } static inline void @@ -357,8 +345,6 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; u16 ds_cnt, ds_cnt_inl = 0; u16 headlen, ihs, frag_pi; u8 num_wqebbs, opcode; @@ -385,7 +371,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->bytes += num_bytes; stats->xmit_more += skb->xmit_more; - headlen = skb_len - ihs - skb->data_len; + headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; @@ -414,15 +400,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = mss; if (ihs) { + eseg->inline_hdr.sz = cpu_to_be16(ihs); if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, - ihs - VLAN_HLEN, &skb_data, &skb_len); + ihs -= VLAN_HLEN; + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); stats->added_vlan_packets++; } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); } - eseg->inline_hdr.sz = cpu_to_be16(ihs); dseg += ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); @@ -432,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->added_vlan_packets++; } - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; @@ -644,8 +629,6 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; u16 headlen, ihs, pi, frag_pi; u16 ds_cnt, ds_cnt_inl = 0; u8 num_wqebbs, opcode; @@ -672,7 +655,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->bytes += num_bytes; stats->xmit_more += skb->xmit_more; - headlen = skb_len - ihs - skb->data_len; + headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; @@ -704,13 +687,12 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = mss; if (ihs) { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); dseg += ds_cnt_inl; } - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; -- cgit v1.2.3 From 75aa889fb9112874171dcb55b97302293dcd581e Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 2 Apr 2018 14:30:34 +0300 Subject: net/mlx5e: RX, Generalise name of non-linear SKB head size Make name more generic by dropping MPWRQ from it, as it will be used also in Legacy RQ in a downstream patch. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a23fa47fbbcc..9b4ed83783e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -112,7 +112,7 @@ struct page_pool; #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) +#define MLX5E_RX_MAX_HEAD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 5551e4e22ad0..61a76faa2779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1070,7 +1070,7 @@ struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; u32 byte_cnt = cqe_bcnt - headlen; @@ -1078,7 +1078,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct sk_buff *skb; skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); if (unlikely(!skb)) { rq->stats->buff_alloc_err++; return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 027f54ac1ca2..4d316cc9b008 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -100,7 +100,7 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) #ifdef CONFIG_INET /* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN) +#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL -- cgit v1.2.3 From fa698366b79a07b7cb0e16d3f4b9fd9d89acfd40 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 2 Apr 2018 14:30:34 +0300 Subject: net/mlx5e: RX, Generalise function of SKB frag addition Rename it and pass truesize as an extra argument, as it will be used also in Legacy RQ in a downstream patch. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 61a76faa2779..546e3bc46cd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -311,13 +311,11 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_wqe(rq, wi); } -static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, - struct sk_buff *skb, - struct mlx5e_dma_info *di, - u32 frag_offset, u32 len) +static inline void +mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5e_dma_info *di, u32 frag_offset, u32 len, + unsigned int truesize) { - unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); - dma_sync_single_for_cpu(rq->pdev, di->addr + frag_offset, len, DMA_FROM_DEVICE); @@ -1094,9 +1092,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (byte_cnt) { u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + unsigned int truesize = + ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, - pg_consumed_bytes); + mlx5e_add_skb_frag(rq, skb, di, frag_offset, + pg_consumed_bytes, truesize); byte_cnt -= pg_consumed_bytes; frag_offset = 0; di++; -- cgit v1.2.3 From 386471f16b73ddc52d1f711d63c7dba5559c31f6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 16 Apr 2018 17:11:07 +0300 Subject: net/mlx5e: RX, Dedicate a function for copying SKB header Get the logic of copying the packet header into the SKB linear part into a generic function. Function does copy length alignment and dma buffer sync. It is currently called only within the MPWQE flow. In a downstream patch, it will be called within the legacy RQ flow as well. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 30 ++++++++++++++----------- 1 file changed, 17 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 546e3bc46cd1..634540afdcfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -324,6 +324,20 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, di->page, frag_offset, len, truesize); } +static inline void +mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, + struct mlx5e_dma_info *dma_info, + int offset_from, int offset_to, u32 headlen) +{ + const void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, offset_to, from, len); +} + static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, @@ -331,23 +345,13 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - unsigned int len; - /* Aligning len to sizeof(long) optimizes memcpy performance */ - len = ALIGN(headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len); + mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg); if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; - headlen_pg = len; - len = ALIGN(headlen - headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, headlen_pg, - page_address(dma_info->page), - len); + mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg, + headlen - headlen_pg); } } -- cgit v1.2.3 From 6c3a823e1e9c645f30c5b03fefe87fea8881060b Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 2 Apr 2018 16:28:10 +0300 Subject: net/mlx5e: RX, Remove HW LRO support in legacy RQ Current LRO implementation in Legacy RQ uses high-order pages. In downstream patches of this series we complete the transition to using only order-0 pages in RX datapath (which was already done in Striding RQ). Unlike the more advanced Striding RQ, Legacy RQ does not make reuse of any non-consumed buffers of non-full LRO sessions, and combining it with order-0 pages has many performance drawbacks. Hence, here we totally remove LRO support in Legacy RQ. This guarantees having no out-of-order completions, which allows using a cyclic work queue (instead of a linked-list) in a downstream patch. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 7 +++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 33 +++++++++++++--------- 2 files changed, 26 insertions(+), 14 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 42bd256e680d..fffe514ba855 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1515,6 +1515,9 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EOPNOTSUPP; if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) return -EINVAL; + } else if (priv->channels.params.lro_en) { + netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n"); + return -EINVAL; } new_channels.params = priv->channels.params; @@ -1589,6 +1592,10 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) out: mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ab7b2a4e6edc..3f1f0552843c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -182,14 +182,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, @@ -503,14 +495,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - byte_count = params->lro_en ? - params->lro_wqe_sz : - MLX5E_SW2HW_MTU(params, params->sw_mtu); + byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; #endif - rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; + rq->wqe.page_reuse = !params->xdp_prog; /* calc the required page order */ rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); @@ -3311,6 +3301,12 @@ static int set_feature_lro(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); old_params = &priv->channels.params; + if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + netdev_warn(netdev, "can't set LRO with legacy RQ\n"); + err = -EINVAL; + goto out; + } + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); new_channels.params = *old_params; @@ -3480,16 +3476,24 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params *params; mutex_lock(&priv->state_lock); + params = &priv->channels.params; if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (!priv->channels.params.vlan_strip_disable) + if (!params->vlan_strip_disable) netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } + if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + features &= ~NETIF_F_LRO; + if (params->lro_en) + netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + } + mutex_unlock(&priv->state_lock); return features; @@ -4328,7 +4332,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; - if (!!MLX5_CAP_ETH(mdev, lro_cap)) + if (!!MLX5_CAP_ETH(mdev, lro_cap) && + mlx5e_check_fragmented_striding_rq_cap(mdev)) netdev->vlan_features |= NETIF_F_LRO; netdev->hw_features = netdev->vlan_features; -- cgit v1.2.3 From 422d4c401edd186722d7530ffdaf11a6bb542cab Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 2 Apr 2018 17:23:14 +0300 Subject: net/mlx5e: RX, Split WQ objects for different RQ types Replace the common RQ WQ object with two separate ones for the different RQ types. This is in preparation for switching to using a cyclic WQ type in Legacy RQ. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 128 +++++++++++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 35 +++--- 3 files changed, 110 insertions(+), 57 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9b4ed83783e4..f2f2dcf6b23c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -498,10 +498,9 @@ enum mlx5e_rq_flag { struct mlx5e_rq { /* data path */ - struct mlx5_wq_ll wq; - union { struct { + struct mlx5_wq_ll wq; struct mlx5e_wqe_frag_info *frag_info; u32 frag_sz; /* max possible skb frag_sz */ union { @@ -509,6 +508,7 @@ struct mlx5e_rq { }; } wqe; struct { + struct mlx5_wq_ll wq; struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3f1f0552843c..3a007717cba5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -319,10 +319,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_size(&rq->mpwqe.wq); + default: + return mlx5_wq_ll_get_size(&rq->wqe.wq); + } +} + +static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return rq->mpwqe.wq.cur_sz; + default: + return rq->wqe.wq.cur_sz; + } +} + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); @@ -370,7 +390,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq)); + u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } @@ -397,15 +417,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rqp->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, - &rq->wq_ctrl); - if (err) - return err; - - rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; - - wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -434,8 +445,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params); + rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -472,6 +492,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_destroy_umr_mkey; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->wqe.wq); + rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), GFP_KERNEL, cpu_to_node(c->cpu)); @@ -538,16 +567,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; for (i = 0; i < wq_sz; i++) { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5e_rx_wqe *wqe = + mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); - } + wqe->data.byte_count = cpu_to_be32(byte_count); + wqe->data.lkey = rq->mkey_be; + } else { + struct mlx5e_rx_wqe *wqe = + mlx5_wq_ll_get_wqe(&rq->wqe.wq, i); - wqe->data.byte_count = cpu_to_be32(byte_count); - wqe->data.lkey = rq->mkey_be; + wqe->data.byte_count = cpu_to_be32(byte_count); + wqe->data.lkey = rq->mkey_be; + } } INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); @@ -744,51 +778,65 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); struct mlx5e_channel *c = rq->channel; - struct mlx5_wq_ll *wq = &rq->wq; - u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq)); + u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); do { - if (wq->cur_sz >= min_wqes) + if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes) return 0; msleep(20); } while (time_before(jiffies, exp_time)); netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - c->ix, rq->rqn, wq->cur_sz, min_wqes); + c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); return -ETIMEDOUT; } static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe; __be16 wqe_ix_be; u16 wqe_ix; - /* UMR WQE (if in progress) is always at wq->head */ - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - rq->mpwqe.umr_in_progress) - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); - - while (!mlx5_wq_ll_is_empty(wq)) { - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); - rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, - &wqe->next.next_wqe_index); - } + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + + if (rq->mpwqe.umr_in_progress) + mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); + + while (!mlx5_wq_ll_is_empty(wq)) { + struct mlx5e_rx_wqe *wqe; + + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } + } else { + struct mlx5_wq_ll *wq = &rq->wqe.wq; + + while (!mlx5_wq_ll_is_empty(wq)) { + struct mlx5e_rx_wqe *wqe; + + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) { /* Clean outstanding pages on handled WQEs that decided to do page-reuse, * but yet to be re-posted. */ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + if (rq->wqe.page_reuse) { + int wq_sz = mlx5_wq_ll_get_size(wq); - for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) - rq->dealloc_wqe(rq, wqe_ix); + for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) + rq->dealloc_wqe(rq, wqe_ix); + } } } @@ -2809,7 +2857,7 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, param->wq.db_numa_node = param->wq.buf_numa_node; - err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 634540afdcfc..3b12d4de5b98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -113,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, mpwrq_get_cqe_consumed_strides(&cq->title); else cq->decmprs_wqe_counter = - mlx5_wq_ll_ctr2ix(&rq->wq, cq->decmprs_wqe_counter + 1); + mlx5_wq_ll_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -369,7 +369,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); rq->mpwqe.umr_in_progress = false; @@ -470,7 +470,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_ll *wq = &rq->wqe.wq; int err; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) @@ -546,7 +546,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; @@ -987,6 +987,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_ll *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; struct mlx5e_rx_wqe *wqe; __be16 wqe_counter_be; @@ -996,7 +997,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); wi = &rq->wqe.frag_info[wqe_counter]; cqe_bcnt = be32_to_cpu(cqe->byte_cnt); @@ -1018,7 +1019,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_free_rx_wqe_reuse(rq, wi); wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index); } @@ -1029,6 +1030,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_wq_ll *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; @@ -1038,7 +1040,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); wi = &rq->wqe.frag_info[wqe_counter]; cqe_bcnt = be32_to_cpu(cqe->byte_cnt); @@ -1063,7 +1065,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_free_rx_wqe_reuse(rq, wi); wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index); } #endif @@ -1164,6 +1166,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; struct mlx5e_rx_wqe *wqe; + struct mlx5_wq_ll *wq; struct sk_buff *skb; u16 cqe_bcnt; @@ -1193,9 +1196,10 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); - mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) @@ -1399,6 +1403,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_ll *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; struct mlx5e_rx_wqe *wqe; __be16 wqe_counter_be; @@ -1408,7 +1413,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); wi = &rq->wqe.frag_info[wqe_counter]; cqe_bcnt = be32_to_cpu(cqe->byte_cnt); @@ -1425,7 +1430,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wq_free_wqe: mlx5e_free_rx_wqe_reuse(rq, wi); - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index); } @@ -1435,6 +1440,7 @@ wq_free_wqe: void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_ll *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; struct mlx5e_rx_wqe *wqe; __be16 wqe_counter_be; @@ -1444,7 +1450,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); wi = &rq->wqe.frag_info[wqe_counter]; cqe_bcnt = be32_to_cpu(cqe->byte_cnt); @@ -1465,8 +1471,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_free_rx_wqe_reuse(rq, wi); wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index); } #endif /* CONFIG_MLX5_EN_IPSEC */ -- cgit v1.2.3 From 99cbfa93a6122b1e9011d3f4e94b58e10d2f5cd0 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 2 Apr 2018 17:31:31 +0300 Subject: net/mlx5e: RX, Use cyclic WQ in legacy RQ Now that LRO is not supported for Legacy RQ, there is no source of out-of-order completions in the WQ, and we can use a cyclic one. This has multiple advantages: - reduces the WQE size (smaller PCI transactions). - lower overhead in datapath (no handling of 'next' pointers). - no reserved WQE for the WQ head (was need in linked-list). - allows using a constant map between frag and dma_info struct, in downstream patch. Performance tests: ConnectX-4, single core, single RX ring. Major gain in packet rate of single ring XDP drop. Bottleneck is shifted form HW (at 16Mpps) to SW (at 20Mpps). Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 89 ++++++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 115 ++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/wq.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/wq.h | 55 ++++++++++- 6 files changed, 161 insertions(+), 111 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f2f2dcf6b23c..af521dd52993 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -186,9 +186,13 @@ struct mlx5e_tx_wqe { struct mlx5_wqe_data_seg data[0]; }; -struct mlx5e_rx_wqe { +struct mlx5e_rx_wqe_ll { struct mlx5_wqe_srq_next_seg next; - struct mlx5_wqe_data_seg data; + struct mlx5_wqe_data_seg data[0]; +}; + +struct mlx5e_rx_wqe_cyc { + struct mlx5_wqe_data_seg data[0]; }; struct mlx5e_umr_wqe { @@ -500,7 +504,7 @@ struct mlx5e_rq { /* data path */ union { struct { - struct mlx5_wq_ll wq; + struct mlx5_wq_cyc wq; struct mlx5e_wqe_frag_info *frag_info; u32 frag_sz; /* max possible skb frag_sz */ union { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3a007717cba5..7fd2d736fbb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -166,7 +166,7 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, linear_rq_headroom += NET_IP_ALIGN; - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) + if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) return linear_rq_headroom; if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) @@ -205,7 +205,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; + MLX5_WQ_TYPE_CYCLIC; } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -325,7 +325,7 @@ static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: return mlx5_wq_ll_get_size(&rq->mpwqe.wq); default: - return mlx5_wq_ll_get_size(&rq->wqe.wq); + return mlx5_wq_cyc_get_size(&rq->wqe.wq); } } @@ -491,15 +491,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (err) goto err_destroy_umr_mkey; break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, - &rq->wq_ctrl); + default: /* MLX5_WQ_TYPE_CYCLIC */ + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); if (err) return err; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; - wq_sz = mlx5_wq_ll_get_size(&rq->wqe.wq); + wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), @@ -568,19 +568,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - struct mlx5e_rx_wqe *wqe = + struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); - wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); - wqe->data.byte_count = cpu_to_be32(byte_count); - wqe->data.lkey = rq->mkey_be; + wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); + wqe->data[0].byte_count = cpu_to_be32(byte_count); + wqe->data[0].lkey = rq->mkey_be; } else { - struct mlx5e_rx_wqe *wqe = - mlx5_wq_ll_get_wqe(&rq->wqe.wq, i); + struct mlx5e_rx_wqe_cyc *wqe = + mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); - wqe->data.byte_count = cpu_to_be32(byte_count); - wqe->data.lkey = rq->mkey_be; + wqe->data[0].byte_count = cpu_to_be32(byte_count); + wqe->data[0].lkey = rq->mkey_be; } } @@ -630,7 +630,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ + default: /* MLX5_WQ_TYPE_CYCLIC */ kfree(rq->wqe.frag_info); } @@ -801,11 +801,12 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + /* UMR WQE (if in progress) is always at wq->head */ if (rq->mpwqe.umr_in_progress) mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); while (!mlx5_wq_ll_is_empty(wq)) { - struct mlx5e_rx_wqe *wqe; + struct mlx5e_rx_wqe_ll *wqe; wqe_ix_be = *wq->tail_next; wqe_ix = be16_to_cpu(wqe_ix_be); @@ -815,24 +816,19 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) &wqe->next.next_wqe_index); } } else { - struct mlx5_wq_ll *wq = &rq->wqe.wq; - - while (!mlx5_wq_ll_is_empty(wq)) { - struct mlx5e_rx_wqe *wqe; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); + while (!mlx5_wq_cyc_is_empty(wq)) { + wqe_ix = mlx5_wq_cyc_get_tail(wq); rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(wq, wqe_ix_be, - &wqe->next.next_wqe_index); + mlx5_wq_cyc_pop(wq); } /* Clean outstanding pages on handled WQEs that decided to do page-reuse, * but yet to be re-posted. */ if (rq->wqe.page_reuse) { - int wq_sz = mlx5_wq_ll_get_size(wq); + int wq_sz = mlx5_wq_cyc_get_size(wq); for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) rq->dealloc_wqe(rq, wqe_ix); @@ -1958,6 +1954,21 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) kfree(c); } +static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) +{ + int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; + + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + sz += sizeof(struct mlx5e_rx_wqe_ll); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + sz += sizeof(struct mlx5e_rx_wqe_cyc); + } + + return order_base_2(sz); +} + static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) @@ -1965,6 +1976,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + int ndsegs = 1; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -1974,16 +1986,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wqe_stride_size, mlx5e_mpwqe_get_log_stride_size(mdev, params) - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); } + MLX5_SET(wq, wq, wq_type, params->rq_wq_type); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); @@ -1999,8 +2011,9 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); @@ -2051,7 +2064,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ + default: /* MLX5_WQ_TYPE_CYCLIC */ log_cq_size = params->log_rq_mtu_frames; } @@ -2857,8 +2870,8 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, param->wq.db_numa_node = param->wq.buf_numa_node; - err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, - &rq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); if (err) return err; @@ -3360,7 +3373,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) new_channels.params = *old_params; new_channels.params.lro_en = enable; - if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) reset = false; @@ -3566,7 +3579,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 8ab4c96b7f7c..3857f22b5500 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -927,7 +927,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; - params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3b12d4de5b98..3cdf2c097356 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -113,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, mpwrq_get_cqe_consumed_strides(&cq->title); else cq->decmprs_wqe_counter = - mlx5_wq_ll_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); + mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -270,7 +270,12 @@ static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, !mlx5e_page_is_reserved(wi->di.page); } -static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) +{ + return &rq->wqe.frag_info[ix]; +} + +static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, u16 ix) { struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; @@ -281,7 +286,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 wi->offset = 0; } - wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); + wqe->data[0].addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); return 0; } @@ -370,7 +375,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); rq->mpwqe.umr_in_progress = false; @@ -470,31 +475,32 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wqe.wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; int err; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - if (mlx5_wq_ll_is_full(wq)) + if (mlx5_wq_cyc_is_full(wq)) return false; do { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + u16 head = mlx5_wq_cyc_get_head(wq); + struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, head); - err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); + err = mlx5e_alloc_rx_wqe(rq, wqe, head); if (unlikely(err)) { rq->stats->buff_alloc_err++; break; } - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - } while (!mlx5_wq_ll_is_full(wq)); + mlx5_wq_cyc_push(wq); + } while (!mlx5_wq_cyc_is_full(wq)); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); - mlx5_wq_ll_update_db_record(wq); + mlx5_wq_cyc_update_db_record(wq); return !!err; } @@ -987,19 +993,15 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { - struct mlx5_wq_ll *wq = &rq->wqe.wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { @@ -1007,20 +1009,19 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + goto wq_cyc_pop; } /* probably an XDP_DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(wq, wqe_counter_be, - &wqe->next.next_wqe_index); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #ifdef CONFIG_MLX5_ESWITCH @@ -1030,30 +1031,26 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_wq_ll *wq = &rq->wqe.wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; - __be16 wqe_counter_be; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + goto wq_cyc_pop; } /* probably an XDP_DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); @@ -1064,9 +1061,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(wq, wqe_counter_be, - &wqe->next.next_wqe_index); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif @@ -1165,7 +1161,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; - struct mlx5e_rx_wqe *wqe; + struct mlx5e_rx_wqe_ll *wqe; struct mlx5_wq_ll *wq; struct sk_buff *skb; u16 cqe_bcnt; @@ -1403,19 +1399,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { - struct mlx5_wq_ll *wq = &rq->wqe.wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) @@ -1430,8 +1422,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wq_free_wqe: mlx5e_free_rx_wqe_reuse(rq, wi); - mlx5_wq_ll_pop(wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_CORE_IPOIB */ @@ -1440,38 +1431,34 @@ wq_free_wqe: void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { - struct mlx5_wq_ll *wq = &rq->wqe.wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); if (unlikely(!skb)) { mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_EN_IPSEC */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 5b8b35392025..b97bb72b4db4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -85,6 +85,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), MLX5_GET(wq, wqc, log_wq_sz), fbc); + wq->sz = wq->fbc.sz_m1 + 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index b9d7c01fc7cb..0b47126815b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -51,6 +51,9 @@ struct mlx5_wq_ctrl { struct mlx5_wq_cyc { struct mlx5_frag_buf_ctrl fbc; __be32 *db; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; }; struct mlx5_wq_qp { @@ -95,6 +98,43 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); +static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) +{ + return wq->cur_sz == wq->sz; +} + +static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} + static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { return ctr & wq->fbc.sz_m1; @@ -105,6 +145,16 @@ static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) return ctr & wq->fbc.frag_sz_m1; } +static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); +} + static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); @@ -179,11 +229,6 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } -static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr) -{ - return ctr & wq->fbc.sz_m1; -} - static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); -- cgit v1.2.3 From 069d11465a802a2b58ce530312b7bb6e1e0b61a9 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 May 2018 18:23:58 +0300 Subject: net/mlx5e: RX, Enhance legacy Receive Queue memory scheme Enhance the memory scheme of the legacy RQ, such that only order-0 pages are used. Whenever possible, prefer using a linear SKB, and build it wrapping the WQE buffer. Otherwise (for example, jumbo frames on x86), use non-linear SKB, with as many frags as needed. In this case, multiple WQE scatter entries are used, up to a maximum of 4 frags and 10KB of MTU. This implied to remove support of HW LRO in legacy RQ, as it would require large number of page allocations and scatter entries per WQE on archs with PAGE_SIZE = 4KB, yielding bad performance. In earlier patches, we guaranteed that all completions are in-order, and that we use a cyclic WQ. This creates an oppurtunity for a performance optimization: The mapping between a "struct mlx5e_dma_info", and the WQEs (struct mlx5e_wqe_frag_info) pointing to it, is constant across different cycles of a WQ. This allows initializing the mapping in the time of RQ creation, and not handle it in datapath. A struct mlx5e_dma_info that is shared between different WQEs is allocated by the first WQE, and freed by the last one. This implies an important requirement: WQEs that share the same struct mlx5e_dma_info must be posted within the same NAPI. Otherwise, upon completion, struct mlx5e_wqe_frag_info would mistakenly point to the new struct mlx5e_dma_info, not the one that was posted (and the HW wrote to). This bulking requirement is actually good also for performance reasons, hence we extend the bulk beyong the minimal requirement above. With this memory scheme, the RQs memory footprint is reduce by a factor of 2 on x86, and by a factor of 32 on PowerPC. Same factors apply for the number of pages in a GRO session. Performance tests: ConnectX-4, single core, single RX ring, default MTU. x86: CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet rate (early drop in TC): no degradation TCP streams: ~5% improvement PowerPC: CPU: POWER8 (raw), altivec supported Packet rate (early drop in TC): 20% gain TCP streams: 25% gain Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 44 +++- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 238 ++++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 207 +++++++++++++------ 3 files changed, 362 insertions(+), 127 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index af521dd52993..eb9eb7aa953a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -101,11 +101,15 @@ struct page_pool; (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) +#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) +#define MLX5E_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) + #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) @@ -462,8 +466,9 @@ struct mlx5e_dma_info { }; struct mlx5e_wqe_frag_info { - struct mlx5e_dma_info di; + struct mlx5e_dma_info *di; u32 offset; + bool last_in_page; }; struct mlx5e_umr_dma_info { @@ -476,6 +481,8 @@ struct mlx5e_mpw_info { DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; +#define MLX5E_MAX_RX_FRAGS 4 + /* a single cache unit is capable to serve one napi call (for non-striding rq) * or a MPWQE (for striding rq). */ @@ -493,6 +500,9 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); typedef struct sk_buff * (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); @@ -500,16 +510,27 @@ enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), }; +struct mlx5e_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct mlx5e_rq_frags_info { + struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; +}; + struct mlx5e_rq { /* data path */ union { struct { - struct mlx5_wq_cyc wq; - struct mlx5e_wqe_frag_info *frag_info; - u32 frag_sz; /* max possible skb frag_sz */ - union { - bool page_reuse; - }; + struct mlx5_wq_cyc wq; + struct mlx5e_wqe_frag_info *frags; + struct mlx5e_dma_info *di; + struct mlx5e_rq_frags_info info; + mlx5e_fp_skb_from_cqe skb_from_cqe; } wqe; struct { struct mlx5_wq_ll wq; @@ -523,7 +544,6 @@ struct mlx5e_rq { }; struct { u16 headroom; - u8 page_order; u8 map_dir; /* dma map direction */ } buff; @@ -879,6 +899,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); void mlx5e_update_stats(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7fd2d736fbb1..2c634e50d051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -51,6 +51,7 @@ struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; + struct mlx5e_rq_frags_info frags_info; }; struct mlx5e_sq_param { @@ -93,7 +94,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) return true; } -static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) { if (!params->xdp_prog) { u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); @@ -107,19 +108,27 @@ static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) { - u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); } +static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); + + return !params->lro_en && frag_sz <= PAGE_SIZE; +} + static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); s8 signed_log_num_strides_param; u8 log_num_strides; - if (params->lro_en || frag_sz > PAGE_SIZE) + if (!mlx5e_rx_is_linear_skb(mdev, params)) return false; if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) @@ -145,7 +154,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); @@ -163,16 +172,15 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, { u16 linear_rq_headroom = params->xdp_prog ? XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + bool is_linear_skb; linear_rq_headroom += NET_IP_ALIGN; - if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) - return linear_rq_headroom; - - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return linear_rq_headroom; + is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? + mlx5e_rx_is_linear_skb(mdev, params) : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params); - return 0; + return is_linear_skb ? linear_rq_headroom : 0; } void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, @@ -400,6 +408,61 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; } +static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) +{ + struct mlx5e_wqe_frag_info next_frag, *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = true; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = true; +} + +static int mlx5e_init_di_list(struct mlx5e_rq *rq, + struct mlx5e_params *params, + int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(len * sizeof(*rq->wqe.di), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + mlx5e_init_frags_partition(rq); + + return 0; +} + +static void mlx5e_free_di_list(struct mlx5e_rq *rq) +{ + kvfree(rq->wqe.di); +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -409,8 +472,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 byte_count, pool_size; - int npages; + u32 pool_size; int wq_sz; int err; int i; @@ -480,8 +542,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); - byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_wq_destroy; @@ -489,7 +549,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5e_rq_alloc_mpwqe_info(rq, c); if (err) - goto err_destroy_umr_mkey; + goto err_free; break; default: /* MLX5_WQ_TYPE_CYCLIC */ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, @@ -501,13 +561,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); - rq->wqe.frag_info = - kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, cpu_to_node(c->cpu)); - if (!rq->wqe.frag_info) { - err = -ENOMEM; - goto err_rq_wq_destroy; - } + rq->wqe.info = rqp->frags_info; + rq->wqe.frags = + kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) * + sizeof(*rq->wqe.frags), + GFP_KERNEL, cpu_to_node(c->cpu)); + if (!rq->wqe.frags) + goto err_free; + + err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); + if (err) + goto err_free; rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; @@ -518,30 +582,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, #endif rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; if (!rq->handle_rx_cqe) { - kfree(rq->wqe.frag_info); err = -EINVAL; netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); - goto err_rq_wq_destroy; + goto err_free; } - byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; -#endif - rq->wqe.page_reuse = !params->xdp_prog; - - /* calc the required page order */ - rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); - npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE); - rq->buff.page_order = order_base_2(npages); - - byte_count |= MLX5_HW_START_PADDING; + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; } /* Create a page_pool and register it with rxq */ - pp_params.order = rq->buff.page_order; + pp_params.order = 0; pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ pp_params.pool_size = pool_size; pp_params.nid = cpu_to_node(c->cpu); @@ -555,21 +608,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, */ rq->page_pool = page_pool_create(&pp_params); if (IS_ERR(rq->page_pool)) { - if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - kfree(rq->wqe.frag_info); err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; - goto err_rq_wq_destroy; + goto err_free; } err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); if (err) - goto err_rq_wq_destroy; + goto err_free; for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); + u32 byte_count = + rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); @@ -578,9 +631,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } else { struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); + int f; - wqe->data[0].byte_count = cpu_to_be32(byte_count); - wqe->data[0].lkey = rq->mkey_be; + for (f = 0; f < rq->wqe.info.num_frags; f++) { + u32 frag_size = rq->wqe.info.arr[f].frag_size | + MLX5_HW_START_PADDING; + + wqe->data[f].byte_count = cpu_to_be32(frag_size); + wqe->data[f].lkey = rq->mkey_be; + } + /* check if num_frags is not a pow of two */ + if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { + wqe->data[f].byte_count = 0; + wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + wqe->data[f].addr = 0; + } } } @@ -600,8 +665,16 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, return 0; -err_destroy_umr_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); +err_free: + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + kfree(rq->mpwqe.info); + mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); + } err_rq_wq_destroy: if (rq->xdp_prog) @@ -631,7 +704,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ - kfree(rq->wqe.frag_info); + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); } for (i = rq->page_cache.head; i != rq->page_cache.tail; @@ -823,17 +897,8 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) rq->dealloc_wqe(rq, wqe_ix); mlx5_wq_cyc_pop(wq); } - - /* Clean outstanding pages on handled WQEs that decided to do page-reuse, - * but yet to be re-posted. - */ - if (rq->wqe.page_reuse) { - int wq_sz = mlx5_wq_cyc_get_size(wq); - - for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) - rq->dealloc_wqe(rq, wqe_ix); - } } + } static int mlx5e_open_rq(struct mlx5e_channel *c, @@ -1954,6 +2019,61 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) kfree(c); } +#define DEFAULT_FRAG_SIZE (2048) + +static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_frags_info *info) +{ + u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); + int frag_size_max = DEFAULT_FRAG_SIZE; + u32 buf_size = 0; + int i; + +#ifdef CONFIG_MLX5_EN_IPSEC + if (MLX5_IPSEC_DEV(mdev)) + byte_count += MLX5E_METADATA_ETHER_LEN; +#endif + + if (mlx5e_rx_is_linear_skb(mdev, params)) { + int frag_stride; + + frag_stride = mlx5e_rx_get_linear_frag_sz(params); + frag_stride = roundup_pow_of_two(frag_stride); + + info->arr[0].frag_size = byte_count; + info->arr[0].frag_stride = frag_stride; + info->num_frags = 1; + info->wqe_bulk = PAGE_SIZE / frag_stride; + goto out; + } + + if (byte_count > PAGE_SIZE + + (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) + frag_size_max = PAGE_SIZE; + + i = 0; + while (buf_size < byte_count) { + int frag_size = byte_count - buf_size; + + if (i < MLX5E_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + info->arr[i].frag_size = frag_size; + info->arr[i].frag_stride = roundup_pow_of_two(frag_size); + + buf_size += frag_size; + i++; + } + info->num_frags = i; + /* number of different wqes sharing a page */ + info->wqe_bulk = 1 + (info->num_frags % 2); + +out: + info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); + info->log_num_frags = order_base_2(info->num_frags); +} + static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) { int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; @@ -1990,6 +2110,8 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, break; default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); + mlx5e_build_rq_frags_info(mdev, params, ¶m->frags_info); + ndsegs = param->frags_info.num_frags; } MLX5_SET(wq, wq, wq_type, params->rq_wq_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3cdf2c097356..d3a1dd20e41d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -164,8 +164,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) - static inline bool mlx5e_page_is_reserved(struct page *page) { return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); @@ -214,7 +212,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, stats->cache_reuse++; dma_sync_single_for_device(rq->pdev, dma_info->addr, - RQ_PAGE_SIZE(rq), + PAGE_SIZE, DMA_FROM_DEVICE); return true; } @@ -230,7 +228,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return -ENOMEM; dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - RQ_PAGE_SIZE(rq), rq->buff.map_dir); + PAGE_SIZE, rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { put_page(dma_info->page); dma_info->page = NULL; @@ -243,8 +241,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), - rq->buff.map_dir); + dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); } void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, @@ -262,58 +259,96 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, } } -static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) +{ + int err = 0; + + if (!frag->offset) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = mlx5e_page_alloc_mapped(rq, frag->di); + + return err; +} + +static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) { - return rq->wqe.page_reuse && wi->di.page && - (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) && - !mlx5e_page_is_reserved(wi->di.page); + if (frag->last_in_page) + mlx5e_page_release(rq, frag->di, true); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) { - return &rq->wqe.frag_info[ix]; + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; } -static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, u16 ix) +static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, + u16 ix) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); + int err; + int i; - /* check if page exists, hence can be reused */ - if (!wi->di.page) { - if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di))) - return -ENOMEM; - wi->offset = 0; + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = mlx5e_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; + + wqe->data[i].addr = cpu_to_be64(frag->di->addr + + frag->offset + rq->buff.headroom); } - wqe->data[0].addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); return 0; + +free_frags: + while (--i >= 0) + mlx5e_put_rx_frag(rq, --frag); + + return err; } static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi) { - mlx5e_page_release(rq, &wi->di, true); - wi->di.page = NULL; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + mlx5e_put_rx_frag(rq, wi); } -static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { - if (mlx5e_page_reuse(rq, wi)) { - rq->stats->page_reuse++; - return; - } + struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); mlx5e_free_rx_wqe(rq, wi); } -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + int err; + int i; - if (wi->di.page) - mlx5e_free_rx_wqe(rq, wi); + for (i = 0; i < wqe_bulk; i++) { + struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); + + err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); + if (unlikely(err)) + goto free_wqes; + } + + return 0; + +free_wqes: + while (--i >= 0) + mlx5e_dealloc_rx_wqe(rq, ix + i); + + return err; } static inline void @@ -476,26 +511,28 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk; int err; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - if (mlx5_wq_cyc_is_full(wq)) + wqe_bulk = rq->wqe.info.wqe_bulk; + + if (mlx5_wq_cyc_missing(wq) < wqe_bulk) return false; do { u16 head = mlx5_wq_cyc_get_head(wq); - struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, head); - err = mlx5e_alloc_rx_wqe(rq, wqe, head); + err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); if (unlikely(err)) { rq->stats->buff_alloc_err++; break; } - mlx5_wq_cyc_push(wq); - } while (!mlx5_wq_cyc_is_full(wq)); + mlx5_wq_cyc_push_n(wq, wqe_bulk); + } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); @@ -949,11 +986,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, return skb; } -static inline -struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { - struct mlx5e_dma_info *di = &wi->di; + struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb; void *va, *data; @@ -968,7 +1005,6 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, frag_size, DMA_FROM_DEVICE); prefetchw(va); /* xdp_frame data area */ prefetch(data); - wi->offset += frag_size; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { rq->stats->wqe_err++; @@ -991,6 +1027,56 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +{ + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *head_wi = wi; + u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats->wqe_err++; + return NULL; + } + + /* XDP is not supported in this configuration, as incoming packets + * might spread among multiple pages. + */ + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + while (byte_cnt) { + u16 frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + frag_headlen = 0; + frag_info++; + wi++; + } + + /* copy header */ + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, + 0, headlen); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + return skb; +} + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; @@ -1003,23 +1089,23 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_cyc_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1041,16 +1127,16 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { + /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_cyc_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); @@ -1060,7 +1146,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1409,7 +1496,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) goto wq_free_wqe; @@ -1421,7 +1508,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: - mlx5e_free_rx_wqe_reuse(rq, wi); + mlx5e_free_rx_wqe(rq, wi); mlx5_wq_cyc_pop(wq); } @@ -1441,7 +1528,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); @@ -1456,7 +1543,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); + mlx5e_free_rx_wqe(rq, wi); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } -- cgit v1.2.3 From 5ffd81943d7a57423f204cd5844bf430b5634472 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 20 Feb 2018 15:17:54 +0200 Subject: net/mlx5e: RX, Always prefer Linear SKB configuration Prefer the linear SKB configuration of Legacy RQ over the non-linear one of Striding RQ. This implies that ConnectX-4 LX now uses legacy RQ by default, as it does not support the linear configuration of Striding RQ. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2c634e50d051..333d4ed52b94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4405,9 +4405,16 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ - if (mlx5e_striding_rq_possible(mdev, params)) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, - !slow_pci_heuristic(mdev)); + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || + !mlx5e_rx_is_linear_skb(mdev, params))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); -- cgit v1.2.3 From f65a59ffbcc26135e225058b2e6cd49ab9f9f13f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 18 Apr 2018 13:29:11 +0300 Subject: net/mlx5e: TX, Separate cachelines of xmit and completion stats Avoid false sharing of cachelines by separating the cachelines of TX stats that are dertied in xmit flow and in completion flow. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 9 +++++---- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 697dc7397ba2..1646859974ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -64,11 +64,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -1137,11 +1137,11 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 390c7afa5188..643153bb3607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -75,11 +75,11 @@ struct mlx5e_sw_stats { u64 tx_csum_partial; u64 tx_csum_partial_inner; u64 tx_queue_stopped; - u64 tx_queue_wake; u64 tx_queue_dropped; u64 tx_xmit_more; - u64 tx_cqe_err; u64 tx_recover; + u64 tx_queue_wake; + u64 tx_cqe_err; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -203,10 +203,11 @@ struct mlx5e_sq_stats { /* less likely accessed in data path */ u64 csum_none; u64 stopped; - u64 wake; u64 dropped; - u64 cqe_err; u64 recover; + /* dirtied @completion */ + u64 wake ____cacheline_aligned_in_smp; + u64 cqe_err; }; struct mlx5e_ch_stats { -- cgit v1.2.3 From eaf47b17a77fda841a1102d76c15161ee438b347 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 30 May 2018 22:13:20 +0200 Subject: net: phy: consider PHY_IGNORE_INTERRUPT in state machine PHY_NOLINK handling We can bail out immediately also in case of PHY_IGNORE_INTERRUPT because phy_mac_interupt() informs us once the link is up. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 05c1e8ef15e6..537297d2b4b4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -894,7 +894,7 @@ void phy_state_machine(struct work_struct *work) needs_aneg = true; break; case PHY_NOLINK: - if (phy_interrupt_is_valid(phydev)) + if (phydev->irq != PHY_POLL) break; err = phy_read_status(phydev); -- cgit v1.2.3 From 9c6ffbacdb5740a8560729f18e8e0e33ad21473b Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 31 May 2018 02:04:43 +0000 Subject: hv_netvsc: fix error return code in netvsc_probe() Fix to return a negative error code from the failover register fail error handling case instead of 0, as done elsewhere in this function. Fixes: 1ff78076d8dd ("netvsc: refactor notifier/event handling code to use the failover framework") Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ebe964203eff..bef4d55a108c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2031,8 +2031,10 @@ static int netvsc_probe(struct hv_device *dev, } net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); - if (IS_ERR(net_device_ctx->failover)) + if (IS_ERR(net_device_ctx->failover)) { + ret = PTR_ERR(net_device_ctx->failover); goto err_failover; + } return ret; -- cgit v1.2.3 From 8cb77149e8c8c8f6fef67ac2749868c563a449c6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 31 May 2018 02:31:12 +0000 Subject: net/mlx5: Make function mlx5_fpga_tls_send_teardown_cmd() static Fixes the following sparse warning: drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c:199:6: warning: symbol 'mlx5_fpga_tls_send_teardown_cmd' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 21048013826c..c9736238604a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -196,8 +196,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } -void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, - u32 swid, gfp_t flags) +static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + void *flow, u32 swid, gfp_t flags) { struct mlx5_teardown_stream_context *ctx; struct mlx5_fpga_dma_buf *buf; -- cgit v1.2.3 From a6ee84be2cb9d1d16efbe833c369122e676be09f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 31 May 2018 11:48:48 +0800 Subject: net: netcp: ethss: remove unnecessary pointer set to NULL If statement has make sure the 'slave->phy' is NULL Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 6a728d35e776..6e455a27a8de 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3206,7 +3206,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, if (!slave->phy) { dev_err(dev, "phy not found for slave %d\n", slave->slave_num); - slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); -- cgit v1.2.3 From deba328ce9b04085c6a214f7dbf5aa8f4f9041a1 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 31 May 2018 19:51:15 +0800 Subject: net: axienet: remove stale comment of axienet_open axienet_open no longer return -ENODEV when PHY cannot be connected to since commit d7cc3163e026 ("net: axienet: Support phy-less mode of operation") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e74e1e897864..f24f48f33802 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -900,7 +900,6 @@ static void axienet_dma_err_handler(unsigned long data); * @ndev: Pointer to net_device structure * * Return: 0, on success. - * -ENODEV, if PHY cannot be connected to * non-zero error value on failure * * This is the driver open routine. It calls phy_start to start the PHY device. -- cgit v1.2.3 From 885892fb378dc096693557ba4f2b875188619b36 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 31 May 2018 05:52:24 -0700 Subject: mlx4_core: restore optimal ICM memory allocation Commit 1383cb8103bb ("mlx4_core: allocate ICM memory in page size chunks") brought two regressions caught in our regression suite. The big one is an additional cost of 256 bytes of overhead per 4096 bytes, or 6.25 % which is unacceptable since ICM can be pretty large. This comes from having to allocate one struct mlx4_icm_chunk (256 bytes) per MLX4_TABLE_CHUNK, which the buggy commit shrank to 4KB (instead of prior 256KB) Note that mlx4_alloc_icm() is already able to try high order allocations and fallback to low-order allocations under high memory pressure. Most of these allocations happen right after boot time, when we get plenty of non fragmented memory, there is really no point being so pessimistic and break huge pages into order-0 ones just for fun. We only have to tweak gfp_mask a bit, to help falling back faster, without risking OOM killings. Second regression is an KASAN fault, that will need further investigations. Fixes: 1383cb8103bb ("mlx4_core: allocate ICM memory in page size chunks") Signed-off-by: Eric Dumazet Acked-by: Tariq Toukan Cc: John Sperbeck Cc: Tarick Bedeir Cc: Qing Huang Cc: Daniel Jurgens Cc: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/icm.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 685337d58276..5342bd8a3d0b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,13 @@ #include "fw.h" /* - * We allocate in page size (default 4KB on many archs) chunks to avoid high - * order memory allocations in fragmented/high usage memory situation. + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. Note that the chunks are not necessarily in contiguous + * physical memory. */ enum { - MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, - MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; + gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; + mask = gfp_mask; + if (cur_order) + mask &= ~__GFP_DIRECT_RECLAIM; + if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], - cur_order, gfp_mask); + cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], - cur_order, gfp_mask, + cur_order, mask, dev->numa_node); if (ret) { -- cgit v1.2.3 From 42b33468987bac0dd95c30f14820c7abac04a153 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 31 May 2018 10:59:47 +0200 Subject: xdp: add flags argument to ndo_xdp_xmit API This patch only change the API and reject any use of flags. This is an intermediate step that allows us to implement the flush flag operation later, for each individual driver in a separate patch. The plan is to implement flush operation via XDP_XMIT_FLUSH flag and then remove XDP_XMIT_FLAGS_NONE when done. Signed-off-by: Jesper Dangaard Brouer Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 +++++- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 ++++- drivers/net/tun.c | 8 ++++++-- drivers/net/virtio_net.c | 5 ++++- include/linux/netdevice.h | 7 ++++--- include/net/xdp.h | 5 +++++ kernel/bpf/devmap.c | 2 +- net/core/filter.c | 2 +- 9 files changed, 32 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9b698c5acd05..c0451d6e0790 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3670,7 +3670,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames). **/ -int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); @@ -3684,6 +3685,9 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; + if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + return -EINVAL; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index eb8804b3d7b6..820f76db251b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -487,7 +487,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); -int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames); +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); void i40e_xdp_flush(struct net_device *dev); /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 031d65c4178d..87f088f4af52 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10023,7 +10023,7 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static int ixgbe_xdp_xmit(struct net_device *dev, int n, - struct xdp_frame **frames) + struct xdp_frame **frames, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; @@ -10033,6 +10033,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; + if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + return -EINVAL; + /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2265d2ccea47..b182b8cdd219 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1285,7 +1285,8 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; -static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) +static int tun_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; @@ -1294,6 +1295,9 @@ static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames int cnt = n; int i; + if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + return -EINVAL; + rcu_read_lock(); numqueues = READ_ONCE(tun->numqueues); @@ -1332,7 +1336,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) if (unlikely(!frame)) return -EOVERFLOW; - return tun_xdp_xmit(dev, 1, &frame); + return tun_xdp_xmit(dev, 1, &frame, 0); } static void tun_xdp_flush(struct net_device *dev) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b2647dd5d302..4ed823625953 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -468,7 +468,7 @@ static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, } static int virtnet_xdp_xmit(struct net_device *dev, - int n, struct xdp_frame **frames) + int n, struct xdp_frame **frames, u32 flags) { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; @@ -481,6 +481,9 @@ static int virtnet_xdp_xmit(struct net_device *dev, int err; int i; + if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + return -EINVAL; + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8452f72087ef..7f17785a59d7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1185,13 +1185,13 @@ struct dev_ifalias { * This function is used to set or query state related to XDP on the * netdevice and manage BPF offload. See definition of * enum bpf_netdev_command for details. - * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp); + * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, + * u32 flags); * This function is used to submit @n XDP packets for transmit on a * netdevice. Returns number of frames successfully transmitted, frames * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. - * TODO: Consider add flag to allow sending flush operation. * void (*ndo_xdp_flush)(struct net_device *dev); * This function is used to inform the driver to flush a particular * xdp tx queue. Must be called on same CPU as xdp_xmit. @@ -1380,7 +1380,8 @@ struct net_device_ops { int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); int (*ndo_xdp_xmit)(struct net_device *dev, int n, - struct xdp_frame **xdp); + struct xdp_frame **xdp, + u32 flags); void (*ndo_xdp_flush)(struct net_device *dev); }; diff --git a/include/net/xdp.h b/include/net/xdp.h index 7ad779237ae8..0c45f0f943ed 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -40,6 +40,11 @@ enum xdp_mem_type { MEM_TYPE_MAX, }; +/* XDP flags for ndo_xdp_xmit */ +#define XDP_XMIT_FLAGS_NONE 0U +#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ +#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH + struct xdp_mem_info { u32 type; /* enum xdp_mem_type, but known size type */ u32 id; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 1fe3fe60508a..037e234056f7 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -232,7 +232,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, prefetch(xdpf); } - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q); + sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, 0); if (sent < 0) { err = sent; sent = 0; diff --git a/net/core/filter.c b/net/core/filter.c index 28e864777c0f..56e40dafdde7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3056,7 +3056,7 @@ static int __bpf_tx_xdp(struct net_device *dev, if (unlikely(!xdpf)) return -EOVERFLOW; - sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf); + sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, 0); if (sent <= 0) return sent; dev->netdev_ops->ndo_xdp_flush(dev); -- cgit v1.2.3 From cdb57ed07fafb79a250e62d714b8910f2d341ef2 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 31 May 2018 10:59:52 +0200 Subject: i40e: implement flush flag for ndo_xdp_xmit When passed the XDP_XMIT_FLUSH flag i40e_xdp_xmit now performs the same kind of ring tail update as in i40e_xdp_flush. The advantage is that all the necessary checks have been performed and xdp_ring can be updated, instead of having to perform the exact same steps/checks in i40e_xdp_flush Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c0451d6e0790..5f01e4ce9c92 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3676,6 +3676,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *xdp_ring; int drops = 0; int i; @@ -3685,20 +3686,25 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; - if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; + xdp_ring = vsi->xdp_rings[queue_index]; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]); + err = i40e_xmit_xdp_ring(xdpf, xdp_ring); if (err != I40E_XDP_TX) { xdp_return_frame_rx_napi(xdpf); drops++; } } + if (unlikely(flags & XDP_XMIT_FLUSH)) + i40e_xdp_ring_update_tail(xdp_ring); + return n - drops; } -- cgit v1.2.3 From 5e2e609522d67750d4b3c0321d1917bb109ac41b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 31 May 2018 10:59:57 +0200 Subject: ixgbe: implement flush flag for ndo_xdp_xmit When passed the XDP_XMIT_FLUSH flag ixgbe_xdp_xmit now performs the same kind of ring tail update as in ixgbe_xdp_flush. The update tail code in ixgbe_xdp_flush is generalized and shared with ixgbe_xdp_xmit. Signed-off-by: Jesper Dangaard Brouer Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 87f088f4af52..4fd77c9067f2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10022,6 +10022,15 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -10033,7 +10042,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; - if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; /* During program transitions its possible adapter->xdp_prog is assigned @@ -10054,6 +10063,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, } } + if (unlikely(flags & XDP_XMIT_FLUSH)) + ixgbe_xdp_ring_update_tail(ring); + return n - drops; } @@ -10072,11 +10084,7 @@ static void ixgbe_xdp_flush(struct net_device *dev) if (unlikely(!ring)) return; - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail(ring); return; } -- cgit v1.2.3 From 0c9d917b7d74b758af1b4e37996af48d95b2ef33 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 31 May 2018 11:00:03 +0200 Subject: tun: implement flush flag for ndo_xdp_xmit When passed the XDP_XMIT_FLUSH flag tun_xdp_xmit now performs the same kind of socket wake up as in tun_xdp_flush(). The wake up code from tun_xdp_flush is generalized and shared with tun_xdp_xmit. Signed-off-by: Jesper Dangaard Brouer Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- drivers/net/tun.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b182b8cdd219..d82a05fb0594 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1285,6 +1285,14 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; +static void __tun_xdp_flush_tfile(struct tun_file *tfile) +{ + /* Notify and wake up reader process */ + if (tfile->flags & TUN_FASYNC) + kill_fasync(&tfile->fasync, SIGIO, POLL_IN); + tfile->socket.sk->sk_data_ready(tfile->socket.sk); +} + static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -1295,7 +1303,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n, int cnt = n; int i; - if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); @@ -1325,6 +1333,9 @@ static int tun_xdp_xmit(struct net_device *dev, int n, } spin_unlock(&tfile->tx_ring.producer_lock); + if (flags & XDP_XMIT_FLUSH) + __tun_xdp_flush_tfile(tfile); + rcu_read_unlock(); return cnt - drops; } @@ -1353,11 +1364,7 @@ static void tun_xdp_flush(struct net_device *dev) tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); - /* Notify and wake up reader process */ - if (tfile->flags & TUN_FASYNC) - kill_fasync(&tfile->fasync, SIGIO, POLL_IN); - tfile->socket.sk->sk_data_ready(tfile->socket.sk); - + __tun_xdp_flush_tfile(tfile); out: rcu_read_unlock(); } -- cgit v1.2.3 From 5d274cb4bbae481161737a1f652e276883d3d970 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 31 May 2018 11:00:08 +0200 Subject: virtio_net: implement flush flag for ndo_xdp_xmit When passed the XDP_XMIT_FLUSH flag virtnet_xdp_xmit now performs the same virtqueue_kick as virtnet_xdp_flush. Signed-off-by: Jesper Dangaard Brouer Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- drivers/net/virtio_net.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4ed823625953..62ba8aadd8e6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -481,7 +481,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, int err; int i; - if (unlikely(flags & ~XDP_XMIT_FLAGS_NONE)) + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); @@ -507,6 +507,10 @@ static int virtnet_xdp_xmit(struct net_device *dev, drops++; } } + + if (flags & XDP_XMIT_FLUSH) + virtqueue_kick(sq->vq); + return n - drops; } -- cgit v1.2.3 From 2fa3c8a8b23041490b279d2630f1cfc9fae242fb Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Thu, 31 May 2018 07:16:32 -0700 Subject: net: virtio: simplify the virtnet_find_vqs Use the common free functions while return successfully. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2d55e2af7cb6..6d710b8b41c5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2609,12 +2609,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi) vi->sq[i].vq = vqs[txq2vq(i)]; } - kfree(names); - kfree(callbacks); - kfree(vqs); - kfree(ctx); + /* run here: ret == 0. */ - return 0; err_find: kfree(ctx); -- cgit v1.2.3 From 5e9f20359a166de934b5bd02cb54c0be0e8c2890 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 31 May 2018 18:47:36 -0700 Subject: qed: Fix shared memory inconsistency between driver and the MFW. The structure shared between driver and management firmware (MFW) differ in sizes. The additional field defined by the MFW is not relevant to the current driver. Add a dummy field to the structure. Fixes: cac6f691 ("qed: Add support for Unified Fabric Port") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8e1e6e1eb40e..beba9308011b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11996,6 +11996,7 @@ struct public_port { #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + u32 reserved1; u32 oem_cfg_port; #define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 #define OEM_CFG_CHANNEL_TYPE_OFFSET 0 -- cgit v1.2.3 From b5fabb080062e7685b898e9c0ec4d95f4d526ed2 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 31 May 2018 18:47:37 -0700 Subject: qed: Fix use of incorrect shmem address. Incorrect shared memory address is used while deriving the values for tc and pri_type. Use shmem address corresponding to 'oem_cfg_func' where the management firmare saves tc/pri_type values. Fixes: cac6f691 ("qed: Add support for Unified Fabric Port") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2612e3e458d9..6f9927d1a501 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1514,9 +1514,10 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); - val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> + OEM_CFG_FUNC_TC_OFFSET; p_hwfn->ufp_info.tc = (u8)val; - val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; -- cgit v1.2.3 From 9a8a02c9d46dcd4c663dac39e6518b6bb7ac1631 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Thu, 31 May 2018 18:01:27 +0100 Subject: net: stmmac: Add Flexible PPS support This adds support for Flexible PPS output (which is equivalent to per_out output of PTP subsystem). Tested using an oscilloscope and the following commands: 1) Start PTP4L: # ptp4l -A -4 -H -m -i eth0 & 2) Set Flexible PPS frequency: # echo > /sys/class/ptp/ptpX/period Where, ts/tns is start time and ps/pns is period time, and ptpX is ptp of eth0. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 2 + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 2 + drivers/net/ethernet/stmicro/stmmac/dwmac5.c | 55 +++++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 22 +++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 7 +++ drivers/net/ethernet/stmicro/stmmac/stmmac.h | 12 +++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++ drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 42 +++++++++++++++-- 10 files changed, 145 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index a679cb729d1d..78fd0f8b8e81 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -346,6 +346,8 @@ struct dma_features { /* TX and RX number of queues */ unsigned int number_rx_queues; unsigned int number_tx_queues; + /* PPS output */ + unsigned int pps_out_num; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; /* TX and RX FIFO sizes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 6330a55953df..eb013d54025a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -187,6 +187,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) #define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a7121a7d9391..7e5d5db0d516 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -796,6 +796,7 @@ const struct stmmac_ops dwmac510_ops = { .safety_feat_irq_status = dwmac5_safety_feat_irq_status, .safety_feat_dump = dwmac5_safety_feat_dump, .rxp_config = dwmac5_rxp_config, + .flex_pps_config = dwmac5_flex_pps_config, }; int dwmac4_setup(struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index bf8e5a16f11c..d37f17ca62fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -373,6 +373,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; dma_cap->number_tx_queues = ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + /* PPS output */ + dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index b2becb80a697..3f4f3132e16b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -8,6 +8,7 @@ #include "dwmac4.h" #include "dwmac5.h" #include "stmmac.h" +#include "stmmac_ptp.h" struct dwmac5_error_desc { bool valid; @@ -494,3 +495,57 @@ re_enable: writel(old_val, ioaddr + GMAC_CONFIG); return ret; } + +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + MAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~PPSx_MASK(index); + + if (!enable) { + val |= PPSCMDx(index, 0x5); + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; + } + + val |= PPSCMDx(index, 0x2); + val |= TRGTMODSELx(index, 0x2); + val |= PPSEN0; + + writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index cc810aff7100..775db776b3cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -11,6 +11,25 @@ #define PRTYEN BIT(1) #define TMOUTEN BIT(0) +#define MAC_PPS_CONTROL 0x00000b70 +#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define PPS_MINIDX(x) ((x) * 8) +#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x)) +#define MCGRENx(x) BIT(PPS_MAXIDX(x)) +#define TRGTMODSELx(x, val) \ + GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \ + ((val) << (PPS_MAXIDX(x) - 2)) +#define PPSCMDx(x, val) \ + GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \ + ((val) << PPS_MINIDX(x)) +#define PPSEN0 BIT(4) +#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10)) +#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10)) +#define TRGTBUSY0 BIT(31) +#define TTSL0 GENMASK(30, 0) +#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) +#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) + #define MTL_RXP_CONTROL_STATUS 0x00000ca0 #define RXPI BIT(31) #define NPE GENMASK(23, 16) @@ -61,5 +80,8 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, int index, unsigned long *count, const char **desc); int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, unsigned int count); +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index f499a7fad6f0..e44e7b26ce82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -241,6 +241,7 @@ struct net_device; struct rgmii_adv; struct stmmac_safety_stats; struct stmmac_tc_entry; +struct stmmac_pps_cfg; /* Helpers to program the MAC core */ struct stmmac_ops { @@ -313,6 +314,10 @@ struct stmmac_ops { /* Flexible RX Parser */ int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, unsigned int count); + /* Flexible PPS */ + int (*flex_pps_config)(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); }; #define stmmac_core_init(__priv, __args...) \ @@ -379,6 +384,8 @@ struct stmmac_ops { stmmac_do_callback(__priv, mac, safety_feat_dump, __args) #define stmmac_rxp_config(__priv, __args...) \ stmmac_do_callback(__priv, mac, rxp_config, __args) +#define stmmac_flex_pps_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, flex_pps_config, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index fbfe5dcefa87..025efbf6145c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -100,6 +100,13 @@ struct stmmac_tc_entry { } __packed val; }; +#define STMMAC_PPS_MAX 4 +struct stmmac_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_count_frames; @@ -160,6 +167,8 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; + u32 sub_second_inc; + u32 systime_flags; u32 adv_ts; int use_riwt; int irq_wake; @@ -181,6 +190,9 @@ struct stmmac_priv { unsigned int tc_entries_max; unsigned int tc_off_max; struct stmmac_tc_entry *tc_entries; + + /* Pulse Per Second output */ + struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; }; enum stmmac_state { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 77af85c981db..11fb7c777d89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -721,6 +721,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) priv->plat->has_gmac4, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + priv->sub_second_inc = sec_inc; + priv->systime_flags = value; + /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 7d3a5c7f5db6..0cb0e39a2be9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -140,17 +140,43 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, static int stmmac_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { - return -EOPNOTSUPP; + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + cfg = &priv->pps[rq->perout.index]; + + cfg->start.tv_sec = rq->perout.start.sec; + cfg->start.tv_nsec = rq->perout.start.nsec; + cfg->period.tv_sec = rq->perout.period.sec; + cfg->period.tv_nsec = rq->perout.period.nsec; + + spin_lock_irqsave(&priv->ptp_lock, flags); + ret = stmmac_flex_pps_config(priv, priv->ioaddr, + rq->perout.index, cfg, on, + priv->sub_second_inc, + priv->systime_flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + break; + default: + break; + } + + return ret; } /* structure describing a PTP hardware clock */ -static const struct ptp_clock_info stmmac_ptp_clock_ops = { +static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, @@ -168,6 +194,16 @@ static const struct ptp_clock_info stmmac_ptp_clock_ops = { */ void stmmac_ptp_register(struct stmmac_priv *priv) { + int i; + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) + break; + priv->pps[i].available = true; + } + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; -- cgit v1.2.3 From cfed0a2c98d798bed970fd450eb4d7854705b3e1 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 1 Jun 2018 07:30:49 +0530 Subject: net: ethernet: mlx4: Remove unnecessary parentheses This patch fixes the clang warning of extraneous parentheses, with the following coccinelle script. @@ identifier i; expression e; statement s; @@ if ( -(i == e) +i == e ) s Suggested-by: Lukas Bulwahn Signed-off-by: Varsha Rao Acked-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/port.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 3ef3406ff4cb..10fcc22f4590 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -614,9 +614,9 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int index_at_dup_port = -1; for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))) index_at_port = i; - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))) index_at_dup_port = i; } /* check that same vlan is not in the tables at different indices */ -- cgit v1.2.3 From 76a45194e5392ef6642785a3aa083818bd8541dd Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 1 Jun 2018 16:28:34 +0100 Subject: net: aquantia: make function aq_fw2x_get_mac_permanent static The function aq_fw2x_get_mac_permanent is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warning: warning: symbol 'aq_fw2x_get_mac_permanent' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 8cfce95c82fc..39cd3a27fe77 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -107,7 +107,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) return 0; } -int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) { int err = 0; u32 h = 0U; -- cgit v1.2.3 From fe083b3f06e91cf8508f23922867e52348205825 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sat, 2 Jun 2018 03:46:13 +0800 Subject: net: mvpp2: mvpp2_percpu_read_relaxed() can be static Fixes: db9d7d36eecc ("net: mvpp2: Split the PPv2 driver to a dedicated directory") Signed-off-by: kbuild test robot Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 45622bffd81a..0319ed9ef8b8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -141,7 +141,7 @@ void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, writel_relaxed(data, priv->swth_base[cpu] + offset); } -u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, +static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, u32 offset) { return readl_relaxed(priv->swth_base[cpu] + offset); -- cgit v1.2.3 From fff200caf6f9179dd9a7fc67acd659e614c3f72f Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Thu, 10 May 2018 16:28:35 +0900 Subject: e1000e: Ignore TSYNCRXCTL when getting I219 clock attributes There have been multiple reports of crashes that look like kernel: RIP: 0010:[] timecounter_read+0xf/0x50 [...] kernel: Call Trace: kernel: [] e1000e_phc_gettime+0x2f/0x60 [e1000e] kernel: [] e1000e_systim_overflow_work+0x1d/0x80 [e1000e] kernel: [] process_one_work+0x155/0x440 kernel: [] worker_thread+0x116/0x4b0 kernel: [] kthread+0xd2/0xf0 kernel: [] ret_from_fork+0x3f/0x70 These can be traced back to the fact that e1000e_systim_reset() skips the timecounter_init() call if e1000e_get_base_timinca() returns -EINVAL, which leads to a null deref in timecounter_read(). Commit 83129b37ef35 ("e1000e: fix systim issues", v4.2-rc1) reworked e1000e_get_base_timinca() in such a way that it can return -EINVAL for e1000_pch_spt if the SYSCFI bit is not set in TSYNCRXCTL. Some experimentation has shown that on I219 (e1000_pch_spt, "MAC: 12") adapters, the E1000_TSYNCRXCTL_SYSCFI flag is unstable; TSYNCRXCTL reads sometimes don't have the SYSCFI bit set. Retrying the read shortly after finds the bit to be set. This was observed at boot (probe) but also link up and link down. Moreover, the phc (PTP Hardware Clock) seems to operate normally even after reads where SYSCFI=0. Therefore, remove this register read and unconditionally set the clock parameters. Reported-by: Achim Mildenberger Message-Id: <20180425065243.g5mqewg5irkwgwgv@f2> Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=1075876 Fixes: 83129b37ef35 ("e1000e: fix systim issues") Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d3fef7fefea8..acf1e8b52b8e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3527,15 +3527,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) } break; case e1000_pch_spt: - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { - /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHZ; - incvalue = INCVALUE_24MHZ; - shift = INCVALUE_SHIFT_24MHZ; - adapter->cc.shift = shift; - break; - } - return -EINVAL; + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + break; case e1000_pch_cnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ -- cgit v1.2.3 From 1ec2297c5cacefd9198a3cc4a27751b6a2203cf3 Mon Sep 17 00:00:00 2001 From: Joanna Yurdal Date: Wed, 16 May 2018 13:14:11 +0200 Subject: igb: Clear TSICR interrupts together with ICR Issuing "ip link set up/down" can block TSICR interrupts, what results in missing PTP Tx timestamp and no PPS pulse generation. Problem happens when the link is set up with the TSICR interrupts pending. ICR is cleared before enabling interrupts, while TSICR is not. When all TSICR interrupts are pending at this moment, time_sync interrupt will never be generated. TSICR should be cleared as well. In order to reproduce the issue: 1. Setup linux with IEEE 1588 grandmaster and PPS output enabled 2. Continue setting link up/down with random intervals between commands 3. Wait until PPS is not generated ( only one pulse is generated and PPS dies), and ptp4l complains constantly about Tx timeout. Signed-off-by: Joanna Yurdal Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 78574c06635b..20b728218d20 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2058,6 +2058,7 @@ int igb_up(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); @@ -3865,6 +3866,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) napi_enable(&(adapter->q_vector[i]->napi)); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); -- cgit v1.2.3 From 06140c793db51476585ee9abf032845e221982cb Mon Sep 17 00:00:00 2001 From: Sergey Nemov Date: Fri, 18 May 2018 11:58:40 +0200 Subject: igb: Wait 10ms just once after TX queues reset Move 10ms sleep out of function resetting TX queue. Reset all the TX queues in one turn and wait for all of them just once. Use usleep_range() instead of mdelay() in order not to affect transmission on other interfaces. Signed-off-by: Sergey Nemov Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 20b728218d20..c33821d2afb3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4055,11 +4055,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; - /* disable the queue */ - wr32(E1000_TXDCTL(reg_idx), 0); - wrfl(); - mdelay(10); - wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -4090,8 +4085,16 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, **/ static void igb_configure_tx(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; int i; + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + for (i = 0; i < adapter->num_tx_queues; i++) igb_configure_tx_ring(adapter, adapter->tx_ring[i]); } -- cgit v1.2.3 From 4be87727d4aebe36913d9f2a6806724cb593516f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 22 May 2018 11:44:29 -0400 Subject: ixgbevf: Fix coexistence of malicious driver detection with XDP In the case of the VF driver it is supposed to provide a context descriptor that allows us to provide information about the header offsets inside of the frame. However in the case of XDP we don't really have any of that information since the data is minimally processed. As a result we were seeing malicious driver detection (MDD) events being triggered when the PF had that functionality enabled. To address this I have added a bit of new code that will "prime" the XDP ring by providing one context descriptor that assumes the minimal setup of an Ethernet frame which is an L2 header length of 14. With just that we can provide enough information to make the hardware happy so that we don't trigger MDD events. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 36 ++++++++++++++++++----- 2 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 70c75681495f..56a1031dcc07 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -76,6 +76,7 @@ enum ixgbevf_ring_state_t { __IXGBEVF_TX_DETECT_HANG, __IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_TX_XDP_RING, + __IXGBEVF_TX_XDP_RING_PRIMED, }; #define ring_is_xdp(ring) \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 083041129539..2d5a706c3c29 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -991,24 +991,45 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_CONSUMED; /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; - i = ring->next_to_use; - tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_buffer = &ring->tx_buffer_info[i]; dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); tx_buffer->data = xdp->data; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + /* Populate minimal context descriptor that will provide for the + * fact that we are expected to process Ethernet frames. + */ + if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { + struct ixgbe_adv_tx_context_desc *context_desc; + + set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); + + context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); + context_desc->vlan_macip_lens = + cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = + cpu_to_le32(IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + context_desc->mss_l4len_idx = 0; + + i = 1; + } /* put descriptor type bits */ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; cmd_type |= len | IXGBE_TXD_CMD; + + tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | @@ -1688,6 +1709,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, sizeof(struct ixgbevf_tx_buffer) * ring->count); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); + clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); -- cgit v1.2.3 From 7d6446db1bcb1008b84db098f4629664cd8b06a6 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 22 May 2018 09:18:46 -0700 Subject: ixgbevf: fix possible race in the reset subtask Extend the RTNL lock in ixgbevf_reset_subtask() to protect the state bits check in addition to the call to ixgbevf_reinit_locked(). This is to make sure that we get the most up-to-date values for the bits and avoid a possible race when going down. Suggested-by: Zhiping du Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2d5a706c3c29..59416eddd840 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3141,15 +3141,17 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_REMOVING, &adapter->state) || - test_bit(__IXGBEVF_RESETTING, &adapter->state)) + test_bit(__IXGBEVF_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } adapter->tx_timeout_count++; - rtnl_lock(); ixgbevf_reinit_locked(adapter); rtnl_unlock(); } -- cgit v1.2.3 From e9c721837da2a73798243ded051ff0ffdbc6066b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 23 May 2018 20:08:13 +0800 Subject: ixgbe: introduce a helper to simplify code ixgbe_dbg_reg_ops_read and ixgbe_dbg_netdev_ops_read copy-pasting the same code except for ixgbe_dbg_netdev_ops_buf/ixgbe_dbg_reg_ops_buf, so introduce a helper ixgbe_dbg_common_ops_read to remove redundant code. Signed-off-by: YueHaibing Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 57 +++++++++--------------- 1 file changed, 21 insertions(+), 36 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 55fe8114fe99..50dfb02fa34c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -10,15 +10,9 @@ static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; -/** - * ixgbe_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) +static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos, + char *dbg_buf) { struct ixgbe_adapter *adapter = filp->private_data; char *buf; @@ -29,8 +23,7 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, return 0; buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_reg_ops_buf); + adapter->netdev->name, dbg_buf); if (!buf) return -ENOMEM; @@ -45,6 +38,20 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, return len; } +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_reg_ops_buf); +} + /** * ixgbe_dbg_reg_ops_write - write into reg_ops datum * @filp: the opened file @@ -121,33 +128,11 @@ static char ixgbe_dbg_netdev_ops_buf[256] = ""; * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - struct ixgbe_adapter *adapter = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_netdev_ops_buf); } /** -- cgit v1.2.3 From cc5b114dcf986bfd8e4c37bf65d1b7b1e5290ac6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 28 May 2018 11:07:20 +0200 Subject: bpf, i40e: add meta data support Add support for XDP meta data when using build skb variant of the i40e driver. Implementation is analogous to the existing ixgbe and ixgbevf support for meta data from 366a88fe2f40 ("bpf, ixgbe: add meta data support") and be8333322eff ("ixgbevf: Add support for meta data"). With the build skb variant we get 192 bytes of extra headroom which can be used for encaps or meta data. Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Tested-by: John Fastabend Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 39 +++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9b698c5acd05..105a26f447c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2032,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, #if L1_CACHE_BYTES < 128 prefetch(xdp->data + L1_CACHE_BYTES); #endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via i40e_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For i40e_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, @@ -2083,19 +2098,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, struct xdp_buff *xdp) { - unsigned int size = xdp->data_end - xdp->data; + unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); + SKB_DATA_ALIGN(I40E_SKB_PAD + + (xdp->data_end - + xdp->data_hard_start)); #endif struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(xdp->data); + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points exactly as xdp->data, otherwise we + * likely have a consumer accessing first few bytes of meta + * data, and then actual data. + */ + prefetch(xdp->data_meta); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); + prefetch(xdp->data_meta + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ skb = build_skb(xdp->data_hard_start, truesize); @@ -2103,8 +2124,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start)); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) @@ -2341,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; - xdp_set_data_meta_invalid(&xdp); + xdp.data_meta = xdp.data; xdp.data_hard_start = xdp.data - i40e_rx_offset(rx_ring); xdp.data_end = xdp.data + size; -- cgit v1.2.3 From 88adce4ea8f96b5191df2bea76905814cc3814e2 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 30 May 2018 09:05:12 -0700 Subject: ixgbe: fix possible race in reset subtask Similar to ixgbevf, the same possibility for race exists. Extend the RTNL lock in ixgbe_reset_subtask() to protect the state bits; this is to make sure that we get the most up-to-date values for the bits and avoid a possible race when going down. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ba3035c08572..dd8a3a037c2f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7621,17 +7621,19 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) + test_bit(__IXGBE_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; - rtnl_lock(); ixgbe_reinit_locked(adapter); rtnl_unlock(); } -- cgit v1.2.3 From f07ff01406c4f67426cb6065d1a7b2c7f0434f5a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 2 Jun 2018 21:09:34 +0300 Subject: mlxsw: spectrum_switchdev: Postpone respin on object deletion VLAN deletion notifications are emitted before the relevant change is projected to bridge configuration. Thus, like with VLAN addition, schedule SPAN respin for later. Fixes: c520bc698647 ("mlxsw: Respin SPAN on switchdev events") Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8a15ac49cb5a..e97652c40d13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1856,7 +1856,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, break; } - mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); return err; } -- cgit v1.2.3 From 1fc68bb7c33fbef2216f361cfc23e41984a5edb1 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 2 Jun 2018 21:09:35 +0300 Subject: mlxsw: spectrum_span: Suppress VLAN on BRIDGE_VLAN_INFO_UNTAGGED When offloading mirroring to gretap or ip6gretap netdevices, an 802.1q bridge is one of the soft devices permissible in the underlay when resolving the packet path. After the packet path is resolved to a particular bridge egress device, flags on packet VLAN determine whether the egressed packet should be tagged. The current logic however only ever sets the VLAN tag, never suppresses it. Thus if there's a VLAN netdevice above the bridge that determines the packet VLAN, that VLAN is never unset, and mirroring is configured with VLAN tagging. Fix by setting the packet VLAN on both branches: set to zero (for unset) when BRIDGE_VLAN_INFO_UNTAGGED, copy the resolved VLAN (e.g. from bridge PVID) otherwise. Fixes: 946a11e7408e ("mlxsw: spectrum_span: Allow bridge for gretap mirror") Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index da3f7f527360..3d187d88cc7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -191,7 +191,9 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, if (br_vlan_get_info(edev, vid, &vinfo)) return NULL; - if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)) + if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) + *p_vid = 0; + else *p_vid = vid; return edev; } -- cgit v1.2.3 From 2a8a15526d319eed948bb38ae9f5900e2bee8565 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 30 May 2018 11:20:04 -0700 Subject: ixgbe: check ipsec ip addr against mgmt filters Make sure we don't try to offload the decryption of an incoming packet that should get delivered to the management engine. This is a corner case that will likely be very seldom seen, but could really confuse someone if they were to hit it. Suggested-by: Jesse Brandeburg Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 88 ++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 99b170f1efd1..e1c976271bbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -444,6 +444,89 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, return 0; } +/** + * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters + * @xs: pointer to transformer state struct + **/ +static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 mfval, manc, reg; + int num_filters = 4; + bool manc_ipv4; + u32 bmcipval; + int i, j; + +#define MANC_EN_IPV4_FILTER BIT(24) +#define MFVAL_IPV4_FILTER_SHIFT 16 +#define MFVAL_IPV6_FILTER_SHIFT 24 +#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) + +#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) +#define IXGBE_BMCIPVAL 0x5060 +#define BMCIP_V4 0x2 +#define BMCIP_V6 0x3 +#define BMCIP_MASK 0x3 + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); + mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); + bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); + + if (xs->props.family == AF_INET) { + /* are there any IPv4 filters to check? */ + if (manc_ipv4) { + /* the 4 ipv4 filters are all in MIPAF(3, i) */ + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) + continue; + + reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); + if (reg == xs->id.daddr.a4) + return 1; + } + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); + if (reg == xs->id.daddr.a4) + return 1; + } + + } else { + /* if there are ipv4 filters, they are in the last ipv6 slot */ + if (manc_ipv4) + num_filters = 3; + + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) + continue; + + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + } + + return 0; +} + /** * ixgbe_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct @@ -465,6 +548,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (ixgbe_ipsec_check_mgmt_ip(xs)) { + netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); + return -EINVAL; + } + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa rsa; -- cgit v1.2.3 From 9a75fa5c166346fa3de35fe8020e43bc98a171d0 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 31 May 2018 14:12:18 -0700 Subject: ixgbe: fix broken ipsec Rx with proper cast on spi Fix up a cast problem introduced by a sparse cleanup patch. This fixes a problem where the encrypted packets were not recognized on Rx and subsequently dropped. Fixes: 9cfbfa701b55 ("ixgbe: cleanup sparse warnings") Signed-off-by: Shannon Nelson Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index e1c976271bbd..344a1f213a5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -663,7 +663,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - (__force u64)rsa.xs->id.spi); + (__force u32)rsa.xs->id.spi); } else { struct tx_sa tsa; -- cgit v1.2.3 From 232b6743e4d12d8383aaf2d632d5ed4cc377419e Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 2 Jun 2018 22:37:42 +0300 Subject: sh_eth: make sh_eth_soft_swap() work on ARM Browsing thru the driver disassembly, I noticed that ARM gcc generated no code whatsoever for sh_eth_soft_swap() while building a little-endian kernel -- apparently __LITTLE_ENDIAN__ was not being #define'd, however it got implicitly #define'd when building with the SH gcc (I could only find the explicit #define __LITTLE_ENDIAN that was #include'd when building a little-endian kernel). Luckily, the Ether controller only doing big- endian DMA is encountered on the early SH771x SoCs only and all ARM SoCs implement EDMR.DE and thus set 'sh_eth_cpu_data::hw_swap'. But anyway, we need to fix the #ifdef inside sh_eth_soft_swap() to something that would work on all architectures... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 5dee19b61aee..d77de7651d8f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -562,7 +562,7 @@ struct sh_eth_private { static inline void sh_eth_soft_swap(char *src, int len) { -#ifdef __LITTLE_ENDIAN__ +#ifdef __LITTLE_ENDIAN u32 *p = (u32 *)src; u32 *maxp; maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); -- cgit v1.2.3 From bb2fa4e847d779c0c3eba78c1abbe142134dcc3d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 2 Jun 2018 22:38:56 +0300 Subject: sh_eth: uninline sh_eth_soft_swap() sh_eth_tsu_soft_swap() is called twice by the driver, remove *inline* and move that function from the header to the driver itself to let gcc decide whether to expand it inline or not... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 11 +++++++++++ drivers/net/ethernet/renesas/sh_eth.h | 12 ------------ 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d9cadfb1bc4a..82b03f13243e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -460,6 +460,17 @@ static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) return ioread32(mdp->tsu_addr + offset); } +static void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN + u32 *p = (u32 *)src; + u32 *maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif +} + static void sh_eth_select_mii(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index d77de7651d8f..726c55a82dd7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -560,18 +560,6 @@ struct sh_eth_private { unsigned wol_enabled:1; }; -static inline void sh_eth_soft_swap(char *src, int len) -{ -#ifdef __LITTLE_ENDIAN - u32 *p = (u32 *)src; - u32 *maxp; - maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); - - for (; p < maxp; p++) - *p = swab32(*p); -#endif -} - static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { -- cgit v1.2.3 From 1100149a23a5053de4709dc4ba14ab14bd826562 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 2 Jun 2018 22:40:16 +0300 Subject: sh_eth: use DIV_ROUND_UP() in sh_eth_soft_swap() When initializing 'maxp' in sh_eth_soft_swap(), the buffer length needs to be rounded up -- that's just asking for DIV_ROUND_UP()! Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 82b03f13243e..e9007b613f17 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -464,7 +464,7 @@ static void sh_eth_soft_swap(char *src, int len) { #ifdef __LITTLE_ENDIAN u32 *p = (u32 *)src; - u32 *maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); + u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32)); for (; p < maxp; p++) *p = swab32(*p); -- cgit v1.2.3 From 4e24f2dd516ed85fc8fd568ceae589ad0a07b7d5 Mon Sep 17 00:00:00 2001 From: Chas Williams <3chas3@gmail.com> Date: Sat, 2 Jun 2018 17:49:53 -0400 Subject: Allow ethtool to change tun link settings Let user space set whatever it would like to advertise for the tun interface. Preserve the existing defaults. Signed-off-by: Chas Williams <3chas3@gmail.com> Signed-off-by: David S. Miller --- drivers/net/tun.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index c94fffee5ea9..067fc9e7e3ed 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -81,6 +81,9 @@ #include #include +static void tun_default_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd); + /* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ @@ -242,6 +245,7 @@ struct tun_struct { struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; + struct ethtool_link_ksettings link_ksettings; }; struct veth { @@ -2295,6 +2299,7 @@ static void tun_setup(struct net_device *dev) tun->owner = INVALID_UID; tun->group = INVALID_GID; + tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; @@ -3326,8 +3331,8 @@ static struct miscdevice tun_miscdev = { /* ethtool interface */ -static int tun_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) +static void tun_default_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); @@ -3336,6 +3341,23 @@ static int tun_get_link_ksettings(struct net_device *dev, cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; +} + +static int tun_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct tun_struct *tun = netdev_priv(dev); + + memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); + return 0; +} + +static int tun_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct tun_struct *tun = netdev_priv(dev); + + memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } @@ -3406,6 +3428,7 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, + .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) -- cgit v1.2.3 From 40434a670f66f797b3a40603a286def35337dd4f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sun, 3 Jun 2018 10:40:15 +0800 Subject: net: chelsio: Use zeroing memory allocator instead of allocator/memset Use dma_zalloc_coherent for allocating zeroed memory and remove unnecessary memset function. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 3 +-- drivers/net/ethernet/chelsio/cxgb4/sge.c | 3 +-- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 7 +------ 3 files changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e988caa797cb..20b6e1b3f5e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size; void *s = NULL; - void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -633,7 +633,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, } *(void **)metadata = s; } - memset(p, 0, len); return p; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 276f22357f81..7a271feec5e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size + stat_size; void *s = NULL; - void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -708,7 +708,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, } if (metadata) *(void **)metadata = s; - memset(p, 0, len); return p; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index dfce5df7538e..3007e1ac1e61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, * Allocate the hardware ring and PCI DMA bus address space for said. */ size_t hwlen = nelem * hwsize + stat_size; - void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); if (!hwring) return NULL; @@ -776,11 +776,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, *(void **)swringp = swring; } - /* - * Zero out the hardware ring and return its address as our function - * value. - */ - memset(hwring, 0, hwlen); return hwring; } -- cgit v1.2.3 From 13ce3bc9c1b43566a31d3edc857a5950b2e77bd8 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sun, 3 Jun 2018 16:10:01 +0800 Subject: net: gemini: fix spelling mistake: "it" -> "is" Trivial fix to spelling mistake in gemini dev_warn message Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index bd3f6e4d1341..ff9eb45f67f8 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -539,7 +539,7 @@ static int gmac_setup_txqs(struct net_device *netdev) } if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "TX queue base it not aligned\n"); + dev_warn(geth->dev, "TX queue base is not aligned\n"); kfree(skb_tab); return -ENOMEM; } @@ -680,7 +680,7 @@ static int gmac_setup_rxq(struct net_device *netdev) if (!port->rxq_ring) return -ENOMEM; if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { - dev_warn(geth->dev, "RX queue base it not aligned\n"); + dev_warn(geth->dev, "RX queue base is not aligned\n"); return -ENOMEM; } @@ -905,7 +905,7 @@ static int geth_setup_freeq(struct gemini_ethernet *geth) if (!geth->freeq_ring) return -ENOMEM; if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "queue ring base it not aligned\n"); + dev_warn(geth->dev, "queue ring base is not aligned\n"); goto err_freeq; } -- cgit v1.2.3 From 6dc5aa212321c87a79746980eb258912bcf352ba Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Sun, 3 Jun 2018 17:19:04 +0530 Subject: net: ethernet: bnx2: Remove extra parentheses The following coccinelle script removes extra parentheses to fix the clang warning of extraneous parentheses. @disable paren@ identifier i; expression e; statement s; @@ if ( -(i == e) +i == e ) s Suggested-by: Lukas Bulwahn Signed-off-by: Varsha Rao Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 9ffc4a8c5fc7..2306523778d4 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3285,7 +3285,7 @@ next_rx: sw_cons = BNX2_NEXT_RX_BD(sw_cons); sw_prod = BNX2_NEXT_RX_BD(sw_prod); - if ((rx_pkt == budget)) + if (rx_pkt == budget) break; /* Refresh hw_cons to see if there is new work */ -- cgit v1.2.3 From b8aac410b76058bc00fff9398d273566fac921cf Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Sun, 3 Jun 2018 17:19:52 +0530 Subject: net: ethernet: bnx2: Replace NULL comparison This patch fixes the checkpatch issue of NULL comparison. Replace x == NULL with !x, by using the following coccinelle script: @disable is_null@ expression e; @@ -e==NULL +!e Signed-off-by: Varsha Rao Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 42 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2306523778d4..3853296d78c1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -384,7 +384,7 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, struct bnx2 *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - if (ops == NULL) + if (!ops) return -EINVAL; if (cp->drv_state & CNIC_DRV_STATE_REGD) @@ -755,13 +755,13 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) return -ENOMEM; txr->tx_desc_ring = dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, &txr->tx_desc_mapping, GFP_KERNEL); - if (txr->tx_desc_ring == NULL) + if (!txr->tx_desc_ring) return -ENOMEM; } return 0; @@ -779,7 +779,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) rxr->rx_buf_ring = vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return -ENOMEM; for (j = 0; j < bp->rx_max_ring; j++) { @@ -788,7 +788,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_desc_ring[j] == NULL) + if (!rxr->rx_desc_ring[j]) return -ENOMEM; } @@ -796,7 +796,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) if (bp->rx_pg_ring_size) { rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * bp->rx_max_pg_ring); - if (rxr->rx_pg_ring == NULL) + if (!rxr->rx_pg_ring) return -ENOMEM; } @@ -807,7 +807,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_pg_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_pg_desc_ring[j] == NULL) + if (!rxr->rx_pg_desc_ring[j]) return -ENOMEM; } @@ -845,7 +845,7 @@ bnx2_alloc_stats_blk(struct net_device *dev) sizeof(struct statistics_block); status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, &bp->status_blk_mapping, GFP_KERNEL); - if (status_blk == NULL) + if (!status_blk) return -ENOMEM; bp->status_blk = status_blk; @@ -914,7 +914,7 @@ bnx2_alloc_mem(struct bnx2 *bp) BNX2_PAGE_SIZE, &bp->ctx_blk_mapping[i], GFP_KERNEL); - if (bp->ctx_blk[i] == NULL) + if (!bp->ctx_blk[i]) goto alloc_mem_err; } } @@ -2667,7 +2667,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) u32 val; good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); - if (good_mbuf == NULL) + if (!good_mbuf) return -ENOMEM; BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, @@ -3225,7 +3225,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (len <= bp->rx_copy_thresh) { skb = netdev_alloc_skb(bp->dev, len + 6); - if (skb == NULL) { + if (!skb) { bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; @@ -4561,7 +4561,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (align_start || align_end) { align_buf = kmalloc(len32, GFP_KERNEL); - if (align_buf == NULL) + if (!align_buf) return -ENOMEM; if (align_start) { memcpy(align_buf, start, 4); @@ -4575,7 +4575,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); - if (flash_buffer == NULL) { + if (!flash_buffer) { rc = -ENOMEM; goto nvram_write_end; } @@ -5440,7 +5440,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; int j; - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) continue; for (j = 0; j < BNX2_TX_DESC_CNT; ) { @@ -5448,7 +5448,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct sk_buff *skb = tx_buf->skb; int k, last; - if (skb == NULL) { + if (!skb) { j = BNX2_NEXT_TX_BD(j); continue; } @@ -5485,14 +5485,14 @@ bnx2_free_rx_skbs(struct bnx2 *bp) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; int j; - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return; for (j = 0; j < bp->rx_max_ring_idx; j++) { struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; u8 *data = rx_buf->data; - if (data == NULL) + if (!data) continue; dma_unmap_single(&bp->pdev->dev, @@ -6826,7 +6826,7 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); - if (bp->stats_blk == NULL) + if (!bp->stats_blk) return; net_stats->rx_packets = @@ -7217,7 +7217,7 @@ bnx2_get_eeprom_len(struct net_device *dev) { struct bnx2 *bp = netdev_priv(dev); - if (bp->flash_info == NULL) + if (!bp->flash_info) return 0; return (int) bp->flash_size; @@ -7678,7 +7678,7 @@ bnx2_get_ethtool_stats(struct net_device *dev, u32 *temp_stats = (u32 *) bp->temp_stats_blk; u8 *stats_len_arr = NULL; - if (hw_stats == NULL) { + if (!hw_stats) { memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); return; } @@ -8121,7 +8121,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->temp_stats_blk = kzalloc(sizeof(struct statistics_block), GFP_KERNEL); - if (bp->temp_stats_blk == NULL) { + if (!bp->temp_stats_blk) { rc = -ENOMEM; goto err_out; } -- cgit v1.2.3 From 39dbc646fd2c67ee9b71450ce172cbd714d4e7fb Mon Sep 17 00:00:00 2001 From: Yuval Bason Date: Sun, 3 Jun 2018 19:13:07 +0300 Subject: qed: Add srq core support for RoCE and iWARP This patch adds support for configuring SRQ and provides the necessary APIs for rdma upper layer driver (qedr) to enable the SRQ feature. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Yuval Bason Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 178 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 + drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++- include/linux/qed/qed_rdma_if.h | 12 +- 9 files changed, 234 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 820b226d6ff8..7ed6aa0521e1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -47,6 +47,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) p_mgr->srq_count = num_srqs; } -static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; @@ -2071,7 +2072,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, u32 num_cons, num_qps, num_srqs; enum protocol_type proto; - num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_NOTICE(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index a4e95869889f..758a8b4c0de8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type); u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type); +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index beba9308011b..b9704be53537 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -9725,6 +9725,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, + IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, MAX_IWARP_EQE_ASYNC_OPCODE }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 2a2b1018ed1d..474e6cff5b97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, p_ramrod->sq_num_pages = qp->sq_num_pages; p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); @@ -3004,8 +3006,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; struct regpair *fw_handle = &data->rdma_data.async_handle; struct qed_iwarp_ep *ep = NULL; + u16 srq_offset; + u16 srq_id; u16 cid; ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, @@ -3067,6 +3072,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, qed_iwarp_cid_cleaned(p_hwfn, cid); break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_EMPTY, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_LIMIT, + &srq_id); + break; case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 68c4399ffd50..b04d57ca5176 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -64,6 +64,7 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) +#define QED_RDMA_SRQS QED_ROCE_QPS static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, if (IS_ENABLED(CONFIG_QED_RDMA)) { params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index a411f9c702a1..b8705107a93a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, goto free_cid_map; } + /* Allocate bitmap for srqs */ + p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, + p_rdma_info->num_srqs, "SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_alloc(p_hwfn); if (rc) - goto free_cid_map; + goto free_srq_map; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; +free_srq_map: + kfree(p_rdma_info->srq_map.bitmap); +free_real_cid_map: + kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: kfree(p_rdma_info->cid_map.bitmap); free_tid_map: @@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, if (cdev->rdma_max_sge) dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; + if (p_hwfn->cdev->rdma_max_srq_sge) { + dev->max_srq_sge = min_t(u32, + p_hwfn->cdev->rdma_max_srq_sge, + dev->max_srq_sge); + } dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; dev->max_inline = (cdev->rdma_max_inline) ? @@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; dev->max_pkey = QED_RDMA_MAX_P_KEY; + dev->max_srq = p_hwfn->p_rdma_info->num_srqs; + dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / @@ -1628,6 +1652,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) return QED_LEADING_HWFN(cdev); } +static int qed_rdma_modify_srq(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *in_params) +{ + struct rdma_srq_modify_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + u16 opaque_fid; + int rc; + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_MODIFY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_modify_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + opaque_fid = p_hwfn->hw_info.opaque_fid; + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_destroy_srq(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *in_params) +{ + struct rdma_srq_destroy_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + struct qed_bmap *bmap; + u16 opaque_fid; + int rc; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_destroy_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_create_srq(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *in_params, + struct qed_rdma_create_srq_out_params *out_params) +{ + struct rdma_srq_create_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + enum qed_cxt_elem_type elem_type; + struct qed_spq_entry *p_ent; + u16 opaque_fid, srq_id; + struct qed_bmap *bmap; + u32 returned_id; + int rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); + return rc; + } + + elem_type = QED_ELEM_SRQ; + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); + if (rc) + goto err; + /* returned id is no greater than u16 */ + srq_id = (u16)returned_id; + opaque_fid = p_hwfn->hw_info.opaque_fid; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_srq; + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); + p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); + p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->page_size = cpu_to_le16(in_params->page_size); + DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->srq_id = srq_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "SRQ created Id = %x\n", out_params->srq_id); + + return rc; + +err: + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + return rc; +} + bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; @@ -1773,6 +1946,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, + .rdma_create_srq = &qed_rdma_create_srq, + .rdma_modify_srq = &qed_rdma_modify_srq, + .rdma_destroy_srq = &qed_rdma_destroy_srq, .ll2_acquire_connection = &qed_ll2_acquire_connection, .ll2_establish_connection = &qed_ll2_establish_connection, .ll2_terminate_connection = &qed_ll2_terminate_connection, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 18ec9cbd84f5..6f722ee8ee94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -96,6 +96,8 @@ struct qed_rdma_info { u8 num_cnqs; u32 num_qps; u32 num_mrs; + u32 num_srqs; + u16 srq_id_offset; u16 queue_zone_base; u16 max_queue_zones; enum protocol_type proto; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 6acfd43c1a4f..ee57fcdc698d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); @@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, */ qed_roce_free_real_icid(p_hwfn, icid); } else { - struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)data->rdma_data.async_handle.lo; + + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + union rdma_eqe_data rdata = data->rdma_data; - events->affiliated_event(p_hwfn->p_rdma_info->events.context, - fw_event_code, - (void *)&data->rdma_data.async_handle); + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata.async_handle); + } } return 0; diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 4dd72ba210f5..e05e320d28b7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -485,7 +485,9 @@ enum qed_iwarp_event_type { QED_IWARP_EVENT_ACTIVE_MPA_REPLY, QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, - QED_IWARP_EVENT_TERMINATE_RECEIVED + QED_IWARP_EVENT_TERMINATE_RECEIVED, + QED_IWARP_EVENT_SRQ_LIMIT, + QED_IWARP_EVENT_SRQ_EMPTY, }; enum qed_tcp_ip_version { @@ -646,6 +648,14 @@ struct qed_rdma_ops { int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid); + int (*rdma_create_srq)(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *iparams, + struct qed_rdma_create_srq_out_params *oparams); + int (*rdma_destroy_srq)(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *iparams); + int (*rdma_modify_srq)(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *iparams); + int (*ll2_acquire_connection)(void *rdma_cxt, struct qed_ll2_acquire_data *data); -- cgit v1.2.3 From 1f55c2865c21a49d882261cfa8d85635814ad8ab Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 4 Jun 2018 21:07:59 +0800 Subject: wan/fsl_ucc_hdlc: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/wan/fsl_ucc_hdlc.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 33df76405b86..4205dfd19da3 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -270,10 +270,10 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); /* Get BD buffer */ - bd_buffer = dma_alloc_coherent(priv->dev, - (RX_BD_RING_LEN + TX_BD_RING_LEN) * - MAX_RX_BUF_LENGTH, - &bd_dma_addr, GFP_KERNEL); + bd_buffer = dma_zalloc_coherent(priv->dev, + (RX_BD_RING_LEN + TX_BD_RING_LEN) * + MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); if (!bd_buffer) { dev_err(priv->dev, "Could not allocate buffer descriptors\n"); @@ -281,9 +281,6 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) goto free_tiptr; } - memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN) - * MAX_RX_BUF_LENGTH); - priv->rx_buffer = bd_buffer; priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; -- cgit v1.2.3 From ff2e351e1928b5b81a23d78e3e4effc24db007b9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 4 Jun 2018 21:10:31 +0800 Subject: qed: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Acked-by: Tomer Tayar Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7ed6aa0521e1..b5b5ff725426 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -937,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_mngr->t2[i].p_phys, GFP_KERNEL); + *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + size, &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; } - memset(*p_virt, 0, size); p_mngr->t2[i].size = size; total_size -= size; } -- cgit v1.2.3 From a746407af158e0c9a5f1078a3b3de11b0c77cf03 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 4 Jun 2018 17:43:21 +0300 Subject: net_failover: Use netdev_features_t instead of u32 The features mask needs to be a netdev_features_t (u64) because a u32 is not big enough. Fixes: cfc80d9a1163 ("net: Introduce net_failover driver") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/net_failover.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 8b508e2cf29b..83f7420ddea5 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -380,7 +380,8 @@ static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb) static void net_failover_compute_features(struct net_device *dev) { - u32 vlan_features = FAILOVER_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = FAILOVER_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = FAILOVER_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | -- cgit v1.2.3 From 25ea66544bfd1d9df1b7e1502f8717e85fa1e6e6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 4 Jun 2018 17:46:01 +0300 Subject: team: use netdev_features_t instead of u32 This code was introduced in 2011 around the same time that we made netdev_features_t a u64 type. These days a u32 is not big enough to hold all the potential features. Signed-off-by: Dan Carpenter Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 267dcc929f6c..8863fa023500 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team, static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = TEAM_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | -- cgit v1.2.3 From f0b964e5e4bb6b0c152f8064bb69a0f3d32a5096 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 4 Jun 2018 17:50:09 +0100 Subject: net: hns: Fix the process of adding broadcast addresses to tcam If the multicast mask value in device tree is configured not all 0xff, the broadcast mac will be lost from tcam table after the execution of command 'ifconfig up'. The address is appended by hns_ae_start, but will be clear later by hns_nic_set_rx_mode called in dev_open process. This patch fixed it by not use the multicast mask when add a broadcast address. Fixes: b5996f11ea54 ("net: add Hisilicon Network Subsystem basic ethernet support") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 23 +++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index e0bc79ea3d88..85e1d14514fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1648,6 +1648,15 @@ int hns_dsaf_rm_mac_addr( mac_entry->addr); } +static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev, + u8 port_num, u8 *mask, u8 *addr) +{ + if (MAC_IS_BROADCAST(addr)) + memset(mask, 0xff, ETH_ALEN); + else + memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN); +} + static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) { u16 *a = (u16 *)dst; @@ -1676,7 +1685,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key tmp_mac_key; struct dsaf_tbl_tcam_data tcam_data; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; int mskid; /*chechk mac addr */ @@ -1687,9 +1695,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, } ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ @@ -1844,7 +1855,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; if (!(void *)mac_entry) { dev_err(dsaf_dev->dev, @@ -1861,14 +1871,17 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* always mask vlan_id field */ ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ - hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr); + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); mask_key.high.val = le32_to_cpu(mask_key.high.val); mask_key.low.val = le32_to_cpu(mask_key.low.val); -- cgit v1.2.3 From 763ea096f3cf312608317fb1027d509cfd1efc16 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 5 Jun 2018 13:55:30 +0200 Subject: i40e: remove ndo_xdp_flush call i40e_xdp_flush Remove the ndo_xdp_flush call implementation i40e_xdp_flush as no callers of ndo_xdp_flush are left. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - drivers/net/ethernet/intel/i40e/i40e_txrx.c | 19 ------------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - 3 files changed, 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b5daa5c9c7de..c944bd10b03d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11883,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xdp_flush = i40e_xdp_flush, }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5f01e4ce9c92..713995d04783 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3707,22 +3707,3 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, return n - drops; } - -/** - * i40e_xdp_flush - Implements ndo_xdp_flush - * @dev: netdev - **/ -void i40e_xdp_flush(struct net_device *dev) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - unsigned int queue_index = smp_processor_id(); - struct i40e_vsi *vsi = np->vsi; - - if (test_bit(__I40E_VSI_DOWN, vsi->state)) - return; - - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) - return; - - i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); -} diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 820f76db251b..bb04f6a731fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -489,7 +489,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -void i40e_xdp_flush(struct net_device *dev); /** * i40e_get_head - Retrieve head from head writeback -- cgit v1.2.3 From af3746a779aa5106fb3d1771b8fb28522476b073 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 5 Jun 2018 13:55:35 +0200 Subject: ixgbe: remove ndo_xdp_flush call ixgbe_xdp_flush Remove the ndo_xdp_flush call implementation ixgbe_xdp_flush as no callers of ndo_xdp_flush are left. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 21 --------------------- 1 file changed, 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4fd77c9067f2..ef1afb3a8a97 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10069,26 +10069,6 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, return n - drops; } -static void ixgbe_xdp_flush(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ring *ring; - - /* Its possible the device went down between xdp xmit and flush so - * we need to ensure device is still up. - */ - if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) - return; - - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; - if (unlikely(!ring)) - return; - - ixgbe_xdp_ring_update_tail(ring); - - return; -} - static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -10136,7 +10116,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xdp_flush = ixgbe_xdp_flush, }; /** -- cgit v1.2.3 From 16ec025aa5077144747c97a1e40635f547457a95 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 5 Jun 2018 13:55:40 +0200 Subject: virtio_net: remove ndo_xdp_flush call virtnet_xdp_flush Remove the ndo_xdp_flush call implementation virtnet_xdp_flush as no callers of ndo_xdp_flush are left. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/virtio_net.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 62ba8aadd8e6..8c5b59e79439 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -407,18 +407,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_flush(struct net_device *dev) -{ - struct virtnet_info *vi = netdev_priv(dev); - struct send_queue *sq; - unsigned int qp; - - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; - - virtqueue_kick(sq->vq); -} - static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -2359,7 +2347,6 @@ static const struct net_device_ops virtnet_netdev = { #endif .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, - .ndo_xdp_flush = virtnet_xdp_flush, .ndo_features_check = passthru_features_check, }; -- cgit v1.2.3 From 42421a56541a3b0fb6656406b657ad4686cdaaeb Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 5 Jun 2018 13:55:45 +0200 Subject: tun: remove ndo_xdp_flush call tun_xdp_flush Remove the ndo_xdp_flush call implementation tun_xdp_flush as no callers of ndo_xdp_flush are left. The tun drivers XDP_TX implementation also used tun_xdp_flush (and tun_xdp_xmit). This is easily solved by passing the XDP_XMIT_FLUSH flag to tun_xdp_xmit in tun_xdp_tx. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/tun.c | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d82a05fb0594..ef09224496e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1347,26 +1347,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) if (unlikely(!frame)) return -EOVERFLOW; - return tun_xdp_xmit(dev, 1, &frame, 0); -} - -static void tun_xdp_flush(struct net_device *dev) -{ - struct tun_struct *tun = netdev_priv(dev); - struct tun_file *tfile; - u32 numqueues; - - rcu_read_lock(); - - numqueues = READ_ONCE(tun->numqueues); - if (!numqueues) - goto out; - - tfile = rcu_dereference(tun->tfiles[smp_processor_id() % - numqueues]); - __tun_xdp_flush_tfile(tfile); -out: - rcu_read_unlock(); + return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); } static const struct net_device_ops tap_netdev_ops = { @@ -1387,7 +1368,6 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, - .ndo_xdp_flush = tun_xdp_flush, }; static void tun_flow_init(struct tun_struct *tun) @@ -1706,7 +1686,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, alloc_frag->offset += buflen; if (tun_xdp_tx(tun->dev, &xdp)) goto err_redirect; - tun_xdp_flush(tun->dev); rcu_read_unlock(); preempt_enable(); return NULL; -- cgit v1.2.3 From 7f4828ff27d4c92280222f6427981052fd3319e9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 2 Jun 2018 22:36:06 +0200 Subject: net: phy: add struct device_type representation of a PHY A PHY is a type of MDIO device, so let's model it as struct device_type and place PM ops, attribute groups and release callback on device type level. For this the attribute definitions have to be moved. This change allows us to get rid of the PM ops on a bus level in a second step. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 96 +++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 46 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9e4ba8e80a18..bd0f339f69fd 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -346,6 +346,55 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv) } } +static ssize_t +phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); +} +static DEVICE_ATTR_RO(phy_id); + +static ssize_t +phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; + + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); +} +static DEVICE_ATTR_RO(phy_interface); + +static ssize_t +phy_has_fixups_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "%d\n", phydev->has_fixups); +} +static DEVICE_ATTR_RO(phy_has_fixups); + +static struct attribute *phy_dev_attrs[] = { + &dev_attr_phy_id.attr, + &dev_attr_phy_interface.attr, + &dev_attr_phy_has_fixups.attr, + NULL, +}; +ATTRIBUTE_GROUPS(phy_dev); + +static const struct device_type mdio_bus_phy_type = { + .name = "PHY", + .groups = phy_dev_groups, + .release = phy_device_release, + .pm = MDIO_BUS_PHY_PM_OPS, +}; + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) @@ -359,11 +408,10 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, return ERR_PTR(-ENOMEM); mdiodev = &dev->mdio; - mdiodev->dev.release = phy_device_release; mdiodev->dev.parent = &bus->dev; mdiodev->dev.bus = &mdio_bus_type; + mdiodev->dev.type = &mdio_bus_phy_type; mdiodev->bus = bus; - mdiodev->pm_ops = MDIO_BUS_PHY_PM_OPS; mdiodev->bus_match = phy_bus_match; mdiodev->addr = addr; mdiodev->flags = MDIO_DEVICE_FLAG_PHY; @@ -587,48 +635,6 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) } EXPORT_SYMBOL(get_phy_device); -static ssize_t -phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); -} -static DEVICE_ATTR_RO(phy_id); - -static ssize_t -phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - const char *mode = NULL; - - if (phy_is_internal(phydev)) - mode = "internal"; - else - mode = phy_modes(phydev->interface); - - return sprintf(buf, "%s\n", mode); -} -static DEVICE_ATTR_RO(phy_interface); - -static ssize_t -phy_has_fixups_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "%d\n", phydev->has_fixups); -} -static DEVICE_ATTR_RO(phy_has_fixups); - -static struct attribute *phy_dev_attrs[] = { - &dev_attr_phy_id.attr, - &dev_attr_phy_interface.attr, - &dev_attr_phy_has_fixups.attr, - NULL, -}; -ATTRIBUTE_GROUPS(phy_dev); - /** * phy_device_register - Register the phy device on the MDIO bus * @phydev: phy_device structure to be added to the MDIO bus @@ -651,8 +657,6 @@ int phy_device_register(struct phy_device *phydev) goto out; } - phydev->mdio.dev.groups = phy_dev_groups; - err = device_add(&phydev->mdio.dev); if (err) { pr_err("PHY %d failed to add\n", phydev->mdio.addr); -- cgit v1.2.3 From 9107c05e2e55c1c7abd02ab38f7694e0da08b643 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 2 Jun 2018 22:37:24 +0200 Subject: net: phy: remove PM ops from MDIO bus Current implementation of MDIO bus PM ops doesn't actually implement bus-specific PM ops but just calls PM ops defined on a device level what doesn't seem to be fully in line with the core PM model. When looking e.g. at __device_suspend() the PM core looks for PM ops of a device in a specific order: 1. device PM domain 2. device type 3. device class 4. device bus I think it has good reason that there's no PM ops on device level. Now that a device type representation of PHY's as special type of MDIO devices was added (only user of MDIO bus PM ops), the MDIO bus PM ops can be removed including member pm of struct mdio_device. If for some other type of MDIO device PM ops are needed, it should be modeled as struct device_type as well. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 48 ---------------------------------------------- include/linux/mdio.h | 1 - 2 files changed, 49 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 24b5511222c8..98f4b1f706df 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -717,58 +717,10 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -#ifdef CONFIG_PM -static int mdio_bus_suspend(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->suspend) - return mdio->pm_ops->suspend(dev); - - return 0; -} - -static int mdio_bus_resume(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->resume) - return mdio->pm_ops->resume(dev); - - return 0; -} - -static int mdio_bus_restore(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->restore) - return mdio->pm_ops->restore(dev); - - return 0; -} - -static const struct dev_pm_ops mdio_bus_pm_ops = { - .suspend = mdio_bus_suspend, - .resume = mdio_bus_resume, - .freeze = mdio_bus_suspend, - .thaw = mdio_bus_resume, - .restore = mdio_bus_restore, -}; - -#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops) - -#else - -#define MDIO_BUS_PM_OPS NULL - -#endif /* CONFIG_PM */ - struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .uevent = mdio_uevent, - .pm = MDIO_BUS_PM_OPS, }; EXPORT_SYMBOL(mdio_bus_type); diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 2cfffe586885..bfa7114167d7 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -29,7 +29,6 @@ enum mdio_mutex_lock_class { struct mdio_device { struct device dev; - const struct dev_pm_ops *pm_ops; struct mii_bus *bus; char modalias[MDIO_NAME_SIZE]; -- cgit v1.2.3 From 69e2ecccd0bee6b186d0d42f8093109bfca7e990 Mon Sep 17 00:00:00 2001 From: Kun Yi Date: Mon, 4 Jun 2018 13:17:04 -0700 Subject: net: phy: broadcom: Enable 125 MHz clock on LED4 pin for BCM54612E by default. BCM54612E have 4 multi-functional LED pins that can be configured through register setting; the LED4 pin can be configured to a 125MHz reference clock output by setting the spare register. Since the dedicated CLK125 reference clock pin is not brought out on the 48-Pin MLP, the LED4 pin is the only pin to provide such function in this package, and therefore it is beneficial to just enable the reference clock by default. Signed-off-by: Kun Yi Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/broadcom.c | 16 ++++++++++++++-- include/linux/brcmphy.h | 4 ++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index f9c25912eb98..e86ea105c802 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -54,6 +54,8 @@ static int bcm54210e_config_init(struct phy_device *phydev) static int bcm54612e_config_init(struct phy_device *phydev) { + int reg; + /* Clear TX internal delay unless requested. */ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { @@ -65,8 +67,6 @@ static int bcm54612e_config_init(struct phy_device *phydev) /* Clear RX internal delay unless requested. */ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { - u16 reg; - reg = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); /* Disable RXD to RXC delay (default set) */ @@ -77,6 +77,18 @@ static int bcm54612e_config_init(struct phy_device *phydev) MII_BCM54XX_AUXCTL_MISC_WREN | reg); } + /* Enable CLK125 MUX on LED4 if ref clock is enabled. */ + if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) { + int err; + + reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0); + err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0, + BCM54612E_LED4_CLK125OUT_EN | reg); + + if (err < 0) + return err; + } + return 0; } diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index b324e01ccf2d..daa9234a9baf 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -85,6 +85,7 @@ #define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ #define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ #define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ #define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ @@ -219,6 +220,9 @@ #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) +/* BCM54612E Registers */ +#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) +#define BCM54612E_LED4_CLK125OUT_EN (1 << 1) /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ -- cgit v1.2.3 From 9deb441c113ae9e761e42f78d90736d762ff49a3 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 4 Jun 2018 19:26:07 -0600 Subject: net: ipv6: Generate random IID for addresses on RAWIP devices RAWIP devices such as rmnet do not have a hardware address and instead require the kernel to generate a random IID for the IPv6 addresses. Signed-off-by: Sean Tranchetti Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 4 ++++ net/ipv6/addrconf.c | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index cb02e1a015c1..b9a7548ec6a0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -221,6 +221,10 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; + + /* This perm addr will be used as interface identifier by IPv6 */ + rmnet_dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(rmnet_dev->perm_addr); } /* Exposed API */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f09afc232da4..5596d87952d4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2251,6 +2251,7 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) return addrconf_ifid_ieee1394(eui, dev); case ARPHRD_TUNNEL6: case ARPHRD_IP6GRE: + case ARPHRD_RAWIP: return addrconf_ifid_ip6tnl(eui, dev); } return -1; @@ -3286,7 +3287,8 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_IP6GRE) && (dev->type != ARPHRD_IPGRE) && (dev->type != ARPHRD_TUNNEL) && - (dev->type != ARPHRD_NONE)) { + (dev->type != ARPHRD_NONE) && + (dev->type != ARPHRD_RAWIP)) { /* Alas, we support only Ethernet autoconfiguration. */ return; } -- cgit v1.2.3 From 3602207ca6582dd359308b7bd2ce08348cc0854e Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 4 Jun 2018 19:43:38 -0600 Subject: net: qualcomm: rmnet: Fix use after free while sending command ack When sending an ack to a command packet, the skb is still referenced after it is sent to the real device. Since the real device could free the skb, the device pointer would be invalid. Also, remove an unnecessary variable. Fixes: ceed73a2cf4a ("drivers: net: ethernet: qualcomm: rmnet: Initial implementation") Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 56a93df962e6..3ee8ae9b6838 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -67,7 +67,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; - int xmit_status; + struct net_device *dev = skb->dev; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, @@ -78,9 +78,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb, cmd = RMNET_MAP_GET_CMD_START(skb); cmd->cmd_type = type & 0x03; - netif_tx_lock(skb->dev); - xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); - netif_tx_unlock(skb->dev); + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd -- cgit v1.2.3 From 6f6027a52bfbb9bbf18937d2628eaf1137a5a386 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 5 Jun 2018 02:42:45 +0000 Subject: net/mlx5e: Make function mlx5e_change_rep_mtu() static Fixes the following sparse warning: drivers/net/ethernet/mellanox/mlx5/core/en_rep.c:903:5: warning: symbol 'mlx5e_change_rep_mtu' was not declared. Should it be static? Signed-off-by: Wei Yongjun Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3857f22b5500..57987f6546e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -900,7 +900,7 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; -int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) +static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) { return mlx5e_change_mtu(netdev, new_mtu, NULL); } -- cgit v1.2.3 From 47a6ca3f972f5baf06cd0c9f37d7b5268ade5363 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 5 Jun 2018 02:42:56 +0000 Subject: net/mlx5e: fix error return code in mlx5e_alloc_rq() Fix to return error code -ENOMEM from the kvzalloc_node() error handling case instead of 0, as done elsewhere in this function. Fixes: 069d11465a80 ("net/mlx5e: RX, Enhance legacy Receive Queue memory scheme") Signed-off-by: Wei Yongjun Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 333d4ed52b94..89c96a0f708e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -566,8 +566,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) * sizeof(*rq->wqe.frags), GFP_KERNEL, cpu_to_node(c->cpu)); - if (!rq->wqe.frags) + if (!rq->wqe.frags) { + err = -ENOMEM; goto err_free; + } err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); if (err) -- cgit v1.2.3 From d52c89f120de849575f6b2e5948038f2be12ce6f Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 5 Jun 2018 13:11:16 +0300 Subject: qed*: Utilize FW 8.37.2.0 This FW contains several fixes and features. RDMA - Several modifications and fixes for Memory Windows - drop vlan and tcp timestamp from mss calculation in driver for this FW - Fix SQ completion flow when local ack timeout is infinite - Modifications in t10dif support ETH - Fix aRFS for tunneled traffic without inner IP. - Fix chip configuration which may fail under heavy traffic conditions. - Support receiving any-VNI in VXLAN and GENEVE RX classification. iSCSI / FcoE - Fix iSCSI recovery flow - Drop vlan and tcp timestamp from mss calc for fw 8.37.2.0 Misc - Several registers (split registers) won't read correctly with ethtool -d Signed-off-by: Ariel Elior Signed-off-by: Manish Rangankar Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 139 +++--- drivers/infiniband/hw/qedr/verbs.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 492 ++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 462 ++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_hw.c | 20 + drivers/net/ethernet/qlogic/qed/qed_hw.h | 12 + .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 50 ++- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 13 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 3 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 8 +- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 3 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 31 +- drivers/scsi/qedi/qedi_iscsi.c | 6 - include/linux/qed/common_hsi.h | 4 +- include/linux/qed/iscsi_common.h | 8 +- include/linux/qed/qed_rdma_if.h | 4 +- include/linux/qed/roce_common.h | 1 + 19 files changed, 727 insertions(+), 536 deletions(-) (limited to 'drivers/net') diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b816c80df50b..7e1f7021396a 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum { RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, RDMA_CQE_REQ_STS_XRC_VOILATION_ERR, + RDMA_CQE_REQ_STS_SIG_ERR, MAX_RDMA_CQE_REQUESTER_STATUS_ENUM }; @@ -152,12 +153,12 @@ struct rdma_rq_sge { struct regpair addr; __le32 length; __le32 flags; -#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF -#define RDMA_RQ_SGE_L_KEY_SHIFT 0 +#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF +#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0 #define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 #define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 -#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 -#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 +#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7 +#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29 }; struct rdma_srq_sge { @@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg { MAX_RDMA_DIF_IO_DIRECTION_FLG }; -/* RDMA DIF Runt Result Structure */ -struct rdma_dif_runt_result { - __le16 guard_tag; - __le16 reserved[3]; +struct rdma_dif_params { + __le32 base_ref_tag; + __le16 app_tag; + __le16 app_tag_mask; + __le16 runt_crc_value; + __le16 flags; +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1 +#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8 +#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9 +#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10 +#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F +#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11 + __le32 reserved5; }; -/* Memory window type enumeration */ -enum rdma_mw_type { - RDMA_MW_TYPE_1, - RDMA_MW_TYPE_2A, - MAX_RDMA_MW_TYPE -}; struct rdma_sq_atomic_wqe { __le32 reserved1; @@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe { #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 -#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6 u8 wqe_size; u8 prev_wqe_size; u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 @@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe { __le32 length_lo; __le32 parent_l_key; __le32 reserved4; + struct rdma_dif_params dif_params; }; /* First element (16 bytes) of bind wqe */ @@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd { u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 @@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd { __le32 reserved4; }; +/* Third element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_3rd { + struct rdma_dif_params dif_params; +}; + /* Structure with only the SQ WQE common * fields. Size is of one SQ element (16B) */ @@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe { u8 length_hi; __le32 length_lo; struct regpair pbl_addr; - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd { struct regpair pbl_addr; }; -/* Third element (16 bytes) of fmr wqe */ -struct rdma_sq_fmr_wqe_3rd { - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; -}; struct rdma_sq_local_inv_wqe { struct regpair reserved; @@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe { #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 +#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; @@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe { u8 dif_flags; #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F -#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3 - u8 reserved2[3]; +#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F +#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1 + u8 reserved3[3]; }; /* First element (16 bytes) of rdma wqe */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 3f9afc02d166..e2caabb8a926 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge); - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, wr->sg_list[i].lkey); RQ_SGE_SET(rqe, wr->sg_list[i].addr, @@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, /* First one must include the number * of SGE in the list */ - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0); SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); RQ_SGE_SET(rqe, 0, 0, flags); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 39124b594a36..b9ec460dd996 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -183,16 +183,9 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct chip_platform_defs { - u8 num_ports; - u8 num_pfs; - u8 num_vfs; -}; - /* Chip constant definitions */ struct chip_defs { const char *name; - struct chip_platform_defs per_platform[MAX_PLATFORM_IDS]; }; /* Platform constant definitions */ @@ -317,6 +310,11 @@ struct phy_defs { u32 tbus_data_hi_addr; }; +/* Split type definitions */ +struct split_type_defs { + const char *name; +}; + /******************************** Constants **********************************/ #define MAX_LCIDS 320 @@ -469,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb", - {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "ah", - {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "reserved", - {{0, 0, 0}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } } + {"bb"}, + {"ah"}, + {"reserved"}, }; /* Storm constant definitions array */ @@ -1588,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = { {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_DUMP_NIG */ {{1, 1, 1}, 0, 1, false, false, 0, 1}, @@ -1745,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = { PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; +static struct split_type_defs s_split_type_defs[] = { + /* SPLIT_TYPE_NONE */ + {"eng"}, + + /* SPLIT_TYPE_PORT */ + {"port"}, + + /* SPLIT_TYPE_PF */ + {"pf"}, + + /* SPLIT_TYPE_PORT_PF */ + {"port"}, + + /* SPLIT_TYPE_VF */ + {"vf"} +}; + /**************************** Private Functions ******************************/ /* Reads and returns a single dword from the specified unaligned buffer */ @@ -1781,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 num_pfs = 0, max_pfs_per_port = 0; if (dev_data->initialized) return DBG_STATUS_OK; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; + dev_data->num_vfs = MAX_NUM_VFS_K2; + num_pfs = MAX_NUM_PFS_K2; + max_pfs_per_port = MAX_NUM_PFS_K2 / 2; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; + dev_data->num_vfs = MAX_NUM_VFS_BB; + num_pfs = MAX_NUM_PFS_BB; + max_pfs_per_port = MAX_NUM_PFS_BB; } else { return DBG_STATUS_UNKNOWN_CHIP; } + /* Set platofrm */ dev_data->platform_id = PLATFORM_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; + /* Set port mode */ + switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { + case 0: + dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; + break; + case 1: + dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; + break; + case 2: + dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; + break; + } + + /* Set 100G mode */ + if (dev_data->chip_id == CHIP_BB && + qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2) + dev_data->mode_enable[MODE_100G] = 1; + + /* Set number of ports */ + if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || + dev_data->mode_enable[MODE_100G]) + dev_data->num_ports = 1; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) + dev_data->num_ports = 2; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) + dev_data->num_ports = 4; + + /* Set number of PFs per port */ + dev_data->num_pfs_per_port = min_t(u32, + num_pfs / dev_data->num_ports, + max_pfs_per_port); + /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); dev_data->use_dmae = true; - dev_data->num_regs_read = 0; dev_data->initialized = 1; return DBG_STATUS_OK; @@ -1821,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ -static void qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 storm_id, struct fw_info *fw_info) +static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 storm_id, struct fw_info *fw_info) { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; @@ -1945,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { - /* Read FW image/version from PRAM in a non-reset SEMI */ - bool found = false; - u8 storm_id; - - for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; - storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; - - /* Read FW version/image */ - if (dev_data->block_in_reset[storm->block_id]) - continue; - - /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - if (snprintf(fw_ver_str, sizeof(fw_ver_str), - "%d_%d_%d_%d", fw_info.ver.num.major, - fw_info.ver.num.minor, fw_info.ver.num.rev, - fw_info.ver.num.eng) < 0) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } - - found = true; + /* Read FW info from chip */ + qed_read_fw_info(p_hwfn, p_ptt, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } } @@ -2412,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - count: no. of dumped entries - * - split: split type - * - id: split ID (dumped only if split_id >= 0) + * - count: no. of dumped entries + * - split_type: split type + * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) * - param_name: user parameter value (dumped only if param_name != NULL * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, - const char *split_type, - int split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { - u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0); + u8 num_params = 2 + + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, @@ -2433,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, offset += qed_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries); offset += qed_dump_str_param(dump_buf + offset, - dump, "split", split_type); - if (split_id >= 0) + dump, "split", + s_split_type_defs[split_type].name); + if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); if (param_name && param_val) @@ -2463,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, + u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; if (!dump) return len; @@ -2481,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, dev_data->num_regs_read = 0; } + switch (split_type) { + case SPLIT_TYPE_PORT: + port_id = split_id; + break; + case SPLIT_TYPE_PF: + pf_id = split_id; + break; + case SPLIT_TYPE_PORT_PF: + port_id = split_id / dev_data->num_pfs_per_port; + pf_id = port_id + dev_data->num_ports * + (split_id % dev_data->num_pfs_per_port); + break; + case SPLIT_TYPE_VF: + vf_id = split_id; + break; + default: + break; + } + /* Try reading using DMAE */ - if (dev_data->use_dmae && + if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || wide_bus)) { if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), @@ -2494,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, "Failed reading from chip using DMAE, using GRC instead\n"); } - /* Read registers */ + /* If not read using DMAE, read using GRC */ + + /* Set pretend */ + if (split_type != dev_data->pretend.split_type || split_id != + dev_data->pretend.split_id) { + switch (split_type) { + case SPLIT_TYPE_PORT: + qed_port_pretend(p_hwfn, p_ptt, port_id); + break; + case SPLIT_TYPE_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + case SPLIT_TYPE_PORT_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); + break; + case SPLIT_TYPE_VF: + fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | + (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + default: + break; + } + + dev_data->pretend.split_type = (u8)split_type; + dev_data->pretend.split_id = split_id; + } + + /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); return len; @@ -2518,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, u8 split_id) { u32 offset = 0; @@ -2526,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + split_type, split_id); return offset; } @@ -2559,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, curr_len, false); + dump, addr, curr_len, false, + SPLIT_TYPE_NONE, 0); reg_offset += curr_len; addr += curr_len; @@ -2581,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct dbg_array input_regs_arr, u32 *dump_buf, bool dump, + enum init_split_types split_type, + u8 split_id, bool block_enable[MAX_BLOCK_ID], u32 *num_dumped_reg_entries) { @@ -2628,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, dump, addr, len, - wide_bus); + wide_bus, + split_type, split_id); (*num_dumped_reg_entries)++; } } @@ -2643,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], - const char *split_type_name, - u32 split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum init_split_types hdr_split_type = split_type; u32 num_dumped_reg_entries, offset; + u8 hdr_split_id = split_id; + + /* In PORT_PF split type, print a port split header */ + if (split_type == SPLIT_TYPE_PORT_PF) { + hdr_split_type = SPLIT_TYPE_PORT; + hdr_split_id = split_id / dev_data->num_pfs_per_port; + } /* Calculate register dump header size (and skip it for now) */ offset = qed_grc_dump_regs_hdr(dump_buf, false, 0, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, @@ -2663,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, input_regs_arr, dump_buf + offset, dump, + split_type, + split_id, block_enable, &num_dumped_reg_entries); @@ -2671,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); return num_dumped_reg_entries > 0 ? offset : 0; } @@ -2688,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *chip; - u8 port_id, pf_id, vf_id; u16 fid; - - chip = &s_chip_defs[dev_data->chip_id]; - chip_platform = &chip->per_platform[dev_data->platform_id]; - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_regs_arr; + enum init_split_types split_type; + u16 split_count = 0; u32 split_data_size; - u8 split_type_id; + u8 split_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -2717,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; curr_input_regs_arr.size_in_dwords = split_data_size; - switch (split_type_id) { + switch (split_type) { case SPLIT_TYPE_NONE: - offset += qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "eng", - (u32)(-1), - param_name, - param_val); + split_count = 1; break; - case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < chip_platform->num_ports; - port_id++) { - if (dump) - qed_port_pretend(p_hwfn, p_ptt, - port_id); - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "port", port_id, - param_name, - param_val); - } + split_count = dev_data->num_ports; break; - case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < chip_platform->num_pfs; - pf_id++) { - u8 pfid_shift = - PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; - - if (dump) { - fid = pf_id << pfid_shift; - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "pf", - pf_id, - param_name, - param_val); - } + split_count = dev_data->num_ports * + dev_data->num_pfs_per_port; break; - case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < chip_platform->num_vfs; - vf_id++) { - u8 vfvalid_shift = - PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; - u8 vfid_shift = - PXP_PRETEND_CONCRETE_FID_VFID_SHIFT; - - if (dump) { - fid = BIT(vfvalid_shift) | - (vf_id << vfid_shift); - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "vf", vf_id, - param_name, - param_val); - } + split_count = dev_data->num_vfs; break; - default: - break; + return 0; } + for (split_id = 0; split_id < split_count; split_id++) + offset += qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + split_type, + split_id, + param_name, + param_val); + input_offset += split_data_size; } - /* Pretend to original PF */ + /* Cancel pretends (pretend to original PF) */ if (dump) { fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; qed_fid_pretend(p_hwfn, p_ptt, fid); + dev_data->pretend.split_type = SPLIT_TYPE_NONE; + dev_data->pretend.split_id = 0; } return offset; @@ -2825,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, + SPLIT_TYPE_NONE, 0, NULL, NULL); /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { @@ -2838,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, dump, BYTES_TO_DWORDS (s_reset_regs_defs[i].addr), 1, - false); + false, SPLIT_TYPE_NONE, 0); num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, - true, num_regs, "eng", -1, NULL, NULL); + true, num_regs, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2864,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, SPLIT_TYPE_NONE, + 0, NULL, NULL); /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { @@ -2899,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); addr = GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS); offset += qed_grc_dump_reg_entry(p_hwfn, @@ -2907,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); num_reg_entries += 2; } } @@ -2929,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); num_reg_entries++; } @@ -2937,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, if (dump) qed_grc_dump_regs_hdr(dump_buf, true, - num_reg_entries, "eng", -1, NULL, NULL); + num_reg_entries, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2950,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, - dump, 2, "eng", -1, NULL, NULL); + dump, 2, SPLIT_TYPE_NONE, 0, + NULL, NULL); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). @@ -3096,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + SPLIT_TYPE_NONE, 0); return offset; } @@ -3235,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_mems_arr; + enum init_split_types split_type; u32 split_data_size; - u8 split_type_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -3250,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; curr_input_mems_arr.size_in_dwords = split_data_size; - switch (split_type_id) { - case SPLIT_TYPE_NONE: + if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump); - break; - - default: + else DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); - break; - } input_offset += split_data_size; } @@ -3623,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, dump, addr, num_dwords_to_read, - false); + false, + SPLIT_TYPE_NONE, 0); total_dwords -= num_dwords_to_read; rss_addr++; } @@ -3682,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump, addr, len, - false); + false, SPLIT_TYPE_NONE, 0); } return offset; @@ -3731,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, - dump, 1, "eng", -1, "block", "MCP"); + dump, 1, SPLIT_TYPE_NONE, 0, + "block", "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -3739,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -3923,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, dump, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); } /* Disable block's client and debug output */ @@ -3949,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; bool parities_masked = false; - u8 i, port_mode = 0; u32 offset = 0; + u8 i; *num_dumped_dwords = 0; + dev_data->num_regs_read = 0; - if (dump) { - /* Find port mode */ - switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { - case 0: - port_mode = 1; - break; - case 1: - port_mode = 2; - break; - case 2: - port_mode = 4; - break; - } - - /* Update reset state */ + /* Update reset state */ + if (dump) qed_update_blocks_reset_state(p_hwfn, p_ptt); - } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3989,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS)); offset += qed_dump_num_param(dump_buf + offset, - dump, "num-ports", port_mode); + dump, "num-ports", dev_data->num_ports); /* Dump reset registers (dumped before taking blocks out of reset ) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) @@ -4093,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); - /* Dump static debug data */ + /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && - dev_data->bus.state == DBG_BUS_STATE_IDLE) + (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) offset += qed_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump); @@ -4250,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - reg->size, wide_bus); + reg->size, wide_bus, + SPLIT_TYPE_NONE, 0); } } @@ -4373,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, next_reg_offset, dump, addr, reg->entry_size, - wide_bus); + wide_bus, + SPLIT_TYPE_NONE, 0); } /* Call rule condition function. @@ -4723,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords, false); + trace_data_size_dwords, false, + SPLIT_TYPE_NONE, 0); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -4829,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4898,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4956,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, true, addr, override_window_dwords, - true); + true, SPLIT_TYPE_NONE, 0); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: @@ -4998,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); asserts = &fw_info.fw_asserts_section; @@ -5036,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, asserts->list_element_dword_size, - false); + false, SPLIT_TYPE_NONE, 0); } /* Dump last section */ @@ -5063,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Skip Storm if it's in reset */ + if (dev_data->block_in_reset[storm->block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); + + return true; + } + + return false; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index fde20fd9942c..b285edc8d6a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, { u32 port_mode; - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); + port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); if (port_mode < 3) { p_hwfn->cdev->num_ports_in_engine = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b9704be53537..bee10c1781fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1095,14 +1095,16 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; - u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; }; /* Ramrod data for PF start ramrod */ @@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config { u8 update_rx_def_non_ucast_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; }; /* Data for port update ramrod */ @@ -2535,7 +2540,14 @@ struct idle_chk_data { u16 reserved2; }; -/* Debug Tools data (per HW function) */ +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ struct dbg_tools_data { struct dbg_grc_data grc; struct dbg_bus_data bus; @@ -2544,8 +2556,13 @@ struct dbg_tools_data { u8 block_in_reset[88]; u8 chip_id; u8 platform_id; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; u8 initialized; u8 use_dmae; + u8 reserved; + struct pretend_params pretend; u32 num_regs_read; }; @@ -2974,6 +2991,24 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); +/** + * @brief qed_read_fw_info - Reads FW info from the chip. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param fw_info - Out: a pointer to write the FW info into. + * + * @return true if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); + /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. @@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); +#define NUM_STORMS 6 + +/** + * @brief qed_set_rdma_error_level - Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param assert_level - An array of assert levels for each storm. + * + */ +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) -static const struct iro iro_arr[51] = { +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + +static const struct iro iro_arr[59] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = { {0x10768, 0x20, 0x0, 0x0, 0x20}, {0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc748, 0x8, 0x0, 0x0, 0x1}, + {0xa128, 0x8, 0x0, 0x0, 0x1}, + {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xf030, 0x8, 0x0, 0x0, 0x1}, + {0x13028, 0x8, 0x0, 0x0, 0x1}, + {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xed90, 0x10, 0x0, 0x0, 0x10}, - {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0xed90, 0x28, 0x0, 0x0, 0x28}, + {0xa520, 0x18, 0x0, 0x0, 0x18}, + {0xa6a0, 0x8, 0x0, 0x0, 0x8}, {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; /* Runtime array offsets */ @@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = { #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 - -#define RUNTIME_ARRAY_SIZE 43023 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021 + +#define RUNTIME_ARRAY_SIZE 43022 + /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 }; /* Command for setting tpa parameters */ @@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx { #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx { #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data { struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; - struct regpair dif_runt_addr; - __le32 reserved4[2]; + __le32 reserved4[4]; }; /* rdma resize cq output params */ @@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data { enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, - RDMA_TID_MW_TYPE1, - RDMA_TID_MW_TYPE2A, + RDMA_TID_MW, MAX_RDMA_TID_TYPE }; @@ -7681,6 +7779,16 @@ struct e4_roce_conn_context { struct ustorm_roce_conn_st_ctx ustorm_st_context; }; +/* roce cqes statistics */ +struct roce_cqe_stats { + __le32 req_cqe_error; + __le32 req_remote_access_errors; + __le32 req_remote_invalid_request; + __le32 resp_cqe_error; + __le32 resp_local_length_error; + __le32 reserved; +}; + /* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; @@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats { /* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { - __le32 num_bound_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp requester ramrod data */ @@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data { /* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { - __le32 num_invalidated_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp responder ramrod data */ @@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce error statistics */ +struct roce_error_stats { + __le32 resp_remote_access_errors; + __le32 reserved; +}; + /* roce special events statistics */ struct roce_events_stats { - __le16 silent_drops; - __le16 rnr_naks_sent; + __le32 silent_drops; + __le32 rnr_naks_sent; __le32 retransmit_count; __le32 icrc_error_count; - __le32 reserved; + __le32 implied_nak_seq_err; + __le32 duplicate_request; + __le32 local_ack_timeout_err; + __le32 out_of_sequence; + __le32 packet_seq_err; + __le32 rnr_nak_retry_err; }; -/* ROCE slow path EQ cmd IDs */ +/* roce slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7845,6 +7964,9 @@ struct roce_init_func_params { u8 cnp_dscp; u8 reserved; __le32 cnp_send_timeout; + __le16 rl_offset; + u8 rl_count_log; + u8 reserved1[5]; }; /* roce func init ramrod data */ @@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le16 rq_prod; __le16 conn_dpi; __le16 irq_cons; - __le32 num_invlidated_mw; + __le32 reg9; __le32 reg10; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index fca2dbd93ad9..70504dcf4087 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) *(u32 *)&p_ptt->pxp.pretend); } +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) { u32 concrete_fid = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 8db2839a8ec8..505e94db939d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -244,6 +244,18 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief qed_port_fid_pretend - pretend to another port and another function + * when accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + */ +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid); + /** * @brief qed_vfid_to_concrete - build a concrete FID for a * given VF ID diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1365da7c8900..d845badf9b90 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, ram_line_lo, ram_line_hi; + u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + /* Tunnel type */ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); @@ -1337,8 +1340,13 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; } + qed_wr(p_hwfn, + p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, @@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + + default: + return 0; + } +} + +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); + + qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 474e6cff5b97..90a2b53096e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt, struct qed_iwarp_info *iwarp_info; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; - u8 ts_hdr_size = 0; u32 cid; int rc; @@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt, iparams->cm_info.private_data, iparams->cm_info.private_data_len); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - ep->mss = iparams->mss - ts_hdr_size; + ep->mss = iparams->mss; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = iparams->event_cb; @@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) u8 local_mac_addr[ETH_ALEN]; struct qed_iwarp_ep *ep; int tcp_start_offset; - u8 ts_hdr_size = 0; u8 ll2_syn_handle; int payload_len; u32 hdr_size; @@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + - ts_hdr_size; + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index eed4725761f5..1f6ac848109d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & QED_ACCEPT_ANY_VNI)); + p_ramrod->rx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->rx_mode.state = 0x%x\n", state); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index c4030e949cce..806a8da257e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -183,6 +183,7 @@ struct qed_filter_accept_flags { #define QED_ACCEPT_MCAST_MATCHED 0x08 #define QED_ACCEPT_MCAST_UNMATCHED 0x10 #define QED_ACCEPT_BCAST 0x20 +#define QED_ACCEPT_ANY_VNI 0x40 }; struct qed_arfs_config_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index b8705107a93a..101d677114f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt, case QED_RDMA_TID_FMR: tid_type = RDMA_TID_FMR; break; - case QED_RDMA_TID_MW_TYPE1: - tid_type = RDMA_TID_MW_TYPE1; - break; - case QED_RDMA_TID_MW_TYPE2A: - tid_type = RDMA_TID_MW_TYPE2A; + case QED_RDMA_TID_MW: + tid_type = RDMA_TID_MW; break; default: rc = -EINVAL; @@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt, RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); DMA_REGPAIR_LE(p_ramrod->dif_error_addr, params->dif_error_addr); - DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f7122059b6b5..d8ad2dcad8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -178,7 +178,7 @@ 0x008c80UL #define MCP_REG_SCRATCH \ 0xe20000UL -#define CNIG_REG_NW_PORT_MODE_BB_B0 \ +#define CNIG_REG_NW_PORT_MODE_BB \ 0x218200UL #define MISCS_REG_CHIP_NUM \ 0x00976cUL @@ -1621,6 +1621,7 @@ #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 #define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index ee57fcdc698d..b5ce1581645f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; @@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - *num_invalidated_mw = 0; *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { @@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; @@ -764,8 +760,7 @@ err: } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - u32 *num_bound_mw) + struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; @@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -968,8 +962,6 @@ err_resp: int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - u32 num_invalidated_mw = 0; - u32 num_bound_mw = 0; u32 cq_prod; int rc; @@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } } return 0; @@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { - u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; /* Perform additional operations according to the current state and the @@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) @@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, qp->cq_prod = cq_prod; - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index ff21aa1fd939..2f0a4f2c5ff8 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) else hdrs += IPV4_HDR_LEN; - if (vlan_en) - hdrs += VLAN_LEN; - mss = pmtu - hdrs; - if (tcp_ts_en) - mss -= TCP_OPTION_LEN; - if (!mss) mss = DEF_MSS; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 13c8ab171437..0081fa6d1268 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -109,8 +109,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 33 -#define FW_REVISION_VERSION 11 +#define FW_MINOR_VERSION 37 +#define FW_REVISION_VERSION 2 #define FW_ENGINEERING_VERSION 0 /***********************/ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 938df614cb6a..b34c573f2b30 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx { #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 @@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx { #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 u8 flags1; diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index e05e320d28b7..df4d13f7e191 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -65,8 +65,7 @@ enum qed_roce_qp_state { enum qed_rdma_tid_type { QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_FMR, - QED_RDMA_TID_MW_TYPE1, - QED_RDMA_TID_MW_TYPE2A + QED_RDMA_TID_MW }; struct qed_rdma_events { @@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params { bool dif_enabled; u64 dif_error_addr; - u64 dif_runt_addr; }; struct qed_rdma_create_cq_in_params { diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 193bcef302e1..473fba76aa77 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -43,6 +43,7 @@ #define ROCE_MAX_QPS (32 * 1024) #define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_LKEY_MW_DIF_EN_BIT (28) /* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { -- cgit v1.2.3 From 4f416db98beaef2f047709e4693b0a4c64d9bbf8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 Jun 2018 13:38:21 +0200 Subject: net: hns3: remove unused hclgevf_cfg_func_mta_filter The last patch apparently added a complete replacement for this function, but left the old one in place, which now causes a harmless warning: drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c:731:12: 'hclgevf_cfg_func_mta_filter' defined but not used I assume it can be removed. Fixes: 3a678b5806e6 ("net: hns3: Optimize the VF's process of updating multicast MAC") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index dd8e8e6718dc..bc8a5760d959 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -728,17 +728,6 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg[2] = {0}; - - msg[0] = en; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, - msg, 1, false, NULL, 0); -} - static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) { u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; -- cgit v1.2.3 From ac0fc8a1bbcbe03ee67278afded105c05eb3535e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 5 Jun 2018 08:14:09 -0700 Subject: devlink: Add extack to reload and port_{un, }split operations Add extack argument to reload, port_split and port_unsplit operations. Signed-off-by: David Ahern Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 9 ++++++--- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 5 +++-- drivers/net/netdevsim/devlink.c | 3 ++- include/net/devlink.h | 7 ++++--- net/core/devlink.c | 18 ++++++++++-------- 5 files changed, 25 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 8a766fe28fa0..7ed38d80bc08 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -770,7 +770,8 @@ static void mlxsw_core_driver_put(const char *kind) static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); @@ -782,7 +783,8 @@ static int mlxsw_devlink_port_split(struct devlink *devlink, } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, - unsigned int port_index) + unsigned int port_index, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); @@ -963,7 +965,8 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, pool_type, p_cur, p_max); } -static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink) +static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); int err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 71c2edd83031..db463e20a876 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -92,7 +92,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes) static int nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; @@ -123,7 +123,8 @@ out: } static int -nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index) +nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c index bef7db5d129a..e8366cf372ff 100644 --- a/drivers/net/netdevsim/devlink.c +++ b/drivers/net/netdevsim/devlink.c @@ -147,7 +147,8 @@ out: return err; } -static int nsim_devlink_reload(struct devlink *devlink) +static int nsim_devlink_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) { enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, diff --git a/include/net/devlink.h b/include/net/devlink.h index 9686a1aa4ec9..e336ea9c73df 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -296,12 +296,13 @@ struct devlink_resource { #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 struct devlink_ops { - int (*reload)(struct devlink *devlink); + int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); int (*port_split)(struct devlink *devlink, unsigned int port_index, - unsigned int count); - int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); + unsigned int count, struct netlink_ext_ack *extack); + int (*port_unsplit)(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack); int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); diff --git a/net/core/devlink.c b/net/core/devlink.c index f75ee022e6b2..22099705cc41 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -702,12 +702,13 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, return 0; } -static int devlink_port_split(struct devlink *devlink, - u32 port_index, u32 count) +static int devlink_port_split(struct devlink *devlink, u32 port_index, + u32 count, struct netlink_ext_ack *extack) { if (devlink->ops && devlink->ops->port_split) - return devlink->ops->port_split(devlink, port_index, count); + return devlink->ops->port_split(devlink, port_index, count, + extack); return -EOPNOTSUPP; } @@ -724,14 +725,15 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]); - return devlink_port_split(devlink, port_index, count); + return devlink_port_split(devlink, port_index, count, info->extack); } -static int devlink_port_unsplit(struct devlink *devlink, u32 port_index) +static int devlink_port_unsplit(struct devlink *devlink, u32 port_index, + struct netlink_ext_ack *extack) { if (devlink->ops && devlink->ops->port_unsplit) - return devlink->ops->port_unsplit(devlink, port_index); + return devlink->ops->port_unsplit(devlink, port_index, extack); return -EOPNOTSUPP; } @@ -745,7 +747,7 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, return -EINVAL; port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); - return devlink_port_unsplit(devlink, port_index); + return devlink_port_unsplit(devlink, port_index, info->extack); } static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, @@ -2599,7 +2601,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed"); return err; } - return devlink->ops->reload(devlink); + return devlink->ops->reload(devlink, info->extack); } static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { -- cgit v1.2.3 From 7fa76d777ec53eeece1546b737a3b93b37639575 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 5 Jun 2018 08:14:10 -0700 Subject: netdevsim: Add extack error message for devlink reload devlink reset command can fail if a FIB resource limit is set to a value lower than the current occupancy. Return a proper message indicating the reason for the failure. $ devlink resource sh netdevsim/netdevsim0 netdevsim/netdevsim0: name IPv4 size unlimited unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none resources: name fib size unlimited occ 43 unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none name fib-rules size unlimited occ 4 unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none name IPv6 size unlimited unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none resources: name fib size unlimited occ 54 unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none name fib-rules size unlimited occ 3 unit entry size_min 0 size_max unlimited size_gran 1 dpipe_tables none $ devlink resource set netdevsim/netdevsim0 path /IPv4/fib size 40 $ devlink dev reload netdevsim/netdevsim0 Error: netdevsim: New size is less than current occupancy. devlink answers: Invalid argument Signed-off-by: David Ahern Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/netdevsim/devlink.c | 4 ++-- drivers/net/netdevsim/fib.c | 9 ++++++--- drivers/net/netdevsim/netdevsim.h | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c index e8366cf372ff..ba663e5af168 100644 --- a/drivers/net/netdevsim/devlink.c +++ b/drivers/net/netdevsim/devlink.c @@ -163,7 +163,7 @@ static int nsim_devlink_reload(struct devlink *devlink, err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(net, res_ids[i], val); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } @@ -181,7 +181,7 @@ static void nsim_devlink_net_reset(struct net *net) int i; for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { - if (nsim_fib_set_max(net, res_ids[i], (u64)-1)) { + if (nsim_fib_set_max(net, res_ids[i], (u64)-1, NULL)) { pr_err("Failed to reset limit for resource %u\n", res_ids[i]); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 9bfe9e151e13..f61d094746c0 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -64,7 +64,8 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val) +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, + struct netlink_ext_ack *extack) { struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; @@ -90,10 +91,12 @@ int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val) /* not allowing a new max to be less than curren occupancy * --> no means of evicting entries */ - if (val < entry->num) + if (val < entry->num) { + NL_SET_ERR_MSG_MOD(extack, "New size is less than current occupancy"); err = -EINVAL; - else + } else { entry->max = val; + } return err; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 3a8581af3b85..8ca50b72c328 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -126,7 +126,8 @@ void nsim_devlink_exit(void); int nsim_fib_init(void); void nsim_fib_exit(void); u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, + struct netlink_ext_ack *extack); #else static inline int nsim_devlink_setup(struct netdevsim *ns) { -- cgit v1.2.3 From 3fcc773be62a9c42dc9a5c1108da298fb9f66cfa Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 5 Jun 2018 08:14:11 -0700 Subject: mlxsw: Add extack messages for port_{un, }split failures Return messages in extack for port split/unsplit errors. e.g., $ devlink port split swp1s1 count 4 Error: mlxsw_spectrum: Port cannot be split further. devlink answers: Invalid argument $ devlink port unsplit swp4 Error: mlxsw_spectrum: Port was not split. devlink answers: Invalid argument Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 14 ++++++++++---- drivers/net/ethernet/mellanox/mlxsw/core.h | 5 +++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 15 ++++++++++++--- 3 files changed, 25 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 7ed38d80bc08..f9c724752a32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -775,11 +775,14 @@ static int mlxsw_devlink_port_split(struct devlink *devlink, { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; - return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); + return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, + extack); } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, @@ -788,11 +791,14 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; - return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); + return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, + extack); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 4a8d4c7f89d9..552cfa29c2f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -274,8 +274,9 @@ struct mlxsw_driver { int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, enum devlink_port_type new_type); int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count); - int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); + unsigned int count, struct netlink_ext_ack *extack); + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index fc39f22e5c70..968b88af2ef5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3092,7 +3092,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3104,6 +3105,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } @@ -3112,11 +3114,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (count != 2 && count != 4) { netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); + NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); return -EINVAL; } if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); + NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); return -EINVAL; } @@ -3125,6 +3129,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, base_port = local_port; if (mlxsw_sp->ports[base_port + 1]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } else { @@ -3132,6 +3137,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (mlxsw_sp->ports[base_port + 1] || mlxsw_sp->ports[base_port + 3]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } @@ -3153,7 +3159,8 @@ err_port_split_create: return err; } -static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3165,11 +3172,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); + netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); + NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } -- cgit v1.2.3 From 4016a7f15efc9189f0ce18025fb3306a8b5f9195 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Tue, 5 Jun 2018 10:14:57 -0700 Subject: enic: fix UDP rss bits In commit 48398b6e7065 ("enic: set UDP rss flag") driver needed to set a single bit to enable UDP rss. This is changed to two bit. One for UDP IPv4 and other bit for UDP IPv6. The hardware which supports this is not released yet. When released, driver should set 2 bit to enable UDP rss for both IPv4 and IPv6. Also add spinlock around vnic_dev_capable_rss_hash_type(). Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_ethtool.c | 18 ++++++++++++++---- drivers/net/ethernet/cisco/enic/enic_main.c | 20 ++++++++++++++------ drivers/net/ethernet/cisco/enic/enic_res.c | 7 ++++++- drivers/net/ethernet/cisco/enic/vnic_dev.c | 18 +++++++++++------- drivers/net/ethernet/cisco/enic/vnic_dev.h | 2 +- drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 20 +++++++++++++++++++- drivers/net/ethernet/cisco/enic/vnic_nic.h | 3 ++- 7 files changed, 67 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 869006c2002d..f42f7a6e1559 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -476,18 +476,28 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd) { + u8 rss_hash_type = 0; cmd->data = 0; + spin_lock_bh(&enic->devcmd_lock); + (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); + spin_unlock_bh(&enic->devcmd_lock); switch (cmd->flow_type) { case TCP_V6_FLOW: case TCP_V4_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST; + break; case UDP_V6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; case UDP_V4_FLOW: - if (vnic_dev_capable_udp_rss(enic->vdev)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 8a8b12b720ef..30d2eaa18c04 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2320,16 +2320,24 @@ static int enic_set_rss_nic_cfg(struct enic *enic) { struct device *dev = enic_get_dev(enic); const u8 rss_default_cpu = 0; - u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | - NIC_CFG_RSS_HASH_TYPE_IPV6 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; const u8 rss_hash_bits = 7; const u8 rss_base_cpu = 0; + u8 rss_hash_type; + int res; u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); - if (vnic_dev_capable_udp_rss(enic->vdev)) - rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP; + spin_lock_bh(&enic->devcmd_lock); + res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); + spin_unlock_bh(&enic->devcmd_lock); + if (res) { + /* defaults for old adapters + */ + rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + } + if (rss_enable) { if (!enic_set_rsskey(enic)) { if (enic_set_rsscpu(enic, rss_hash_bits)) { diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 9c96911fb2c8..40b20817ddd5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -149,6 +149,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { + enum vnic_devcmd_cmd cmd = CMD_NIC_CFG; u64 a0, a1; u32 nic_cfg; int wait = 1000; @@ -160,7 +161,11 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, a0 = nic_cfg; a1 = 0; - return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); + if (rss_hash_type & (NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)) + cmd = CMD_NIC_CFG_CHK; + + return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait); } int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 76cdd4c9d11f..e9db811df59c 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1282,19 +1282,23 @@ int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, return ret; } -bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev) +int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type) { u64 a0 = CMD_NIC_CFG, a1 = 0; - u64 rss_hash_type; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); - if (err || !a0) - return false; + /* rss_hash_type is valid only when a0 is 1. Adapter which does not + * support CMD_CAPABILITY for rss_hash_type has a0 = 0 + */ + if (err || (a0 != 1)) + return -EOPNOTSUPP; + + a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & + NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; - rss_hash_type = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & - NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; + *rss_hash_type = (u8)a1; - return (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP); + return 0; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 59d4cc8fbb85..714fc1ed79e3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -184,6 +184,6 @@ int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, u64 *supported_versions, u64 *a1); -bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev); +int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 41de4ba622a1..fef5a0a0663d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -148,8 +148,26 @@ enum vnic_devcmd_cmd { /* del VLAN id in (u16)a0 */ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), - /* nic_cfg in (u32)a0 */ + /* nic_cfg (no wait, always succeeds) + * in: (u32)a0 + * + * Capability query: + * out: (u64) a0 = 1 if a1 is valid + * (u64) a1 = (NIC_CFG bits supported) | (flags << 32) + * + * flags are CMD_NIC_CFG_CAPF_xxx + */ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + /* nic_cfg_chk (will return error if flags are invalid) + * in: (u32)a0 + * + * Capability query: + * out: (u64) a0 = 1 if a1 is valid + * (u64) a1 = (NIC_CFG bits supported) | (flags << 32) + * + * flags are CMD_NIC_CFG_CAPF_xxx + */ + CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h index 5a93db0d7afc..84ff8ca17fcb 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_nic.h +++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h @@ -41,13 +41,14 @@ #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0) #define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) #define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) #define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) -#define NIC_CFG_RSS_HASH_TYPE_UDP (1 << 7) +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7) static inline void vnic_set_nic_cfg(u32 *nic_cfg, u8 rss_default_cpu, u8 rss_hash_type, -- cgit v1.2.3 From 5040cc990cbac98733df4d58fdeac5bbdab15b49 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Tue, 5 Jun 2018 13:38:12 -0700 Subject: net: dsa: b53: Fix for brcm tag issue in Cygnus SoC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the Broadcom Cygnus SoC, the brcm tag needs to be inserted in between the mac address and the ether type (should use 'DSA_PROTO_TAG_BRCM') for the packets sent to the internal b53 switch. Since the Cygnus was added with the BCM58XX device id and the BCM58XX uses 'DSA_PROTO_TAG_BRCM_PREPEND', the data path is broken, due to the incorrect brcm tag location. Add a new b53 device id (BCM583XX) for Cygnus family to fix the issue. Add the new device id to the BCM58XX family as Cygnus is similar to the BCM58XX in most other functionalities. Fixes: 11606039604c ("net: dsa: b53: Support prepended Broadcom tags") Signed-off-by: Arun Parameswaran Acked-by: Scott Branden Reported-by: Clément Péron Reviewed-by: Florian Fainelli Tested-by: Clément Péron Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 15 ++++++++++++++- drivers/net/dsa/b53/b53_priv.h | 2 ++ drivers/net/dsa/b53/b53_srab.c | 4 ++-- 3 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index db9a80e1ee14..5e010b1592f7 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -684,7 +684,8 @@ static int b53_switch_reset(struct b53_device *dev) * still use this driver as a library and need to perform the reset * earlier. */ - if (dev->chip_id == BCM58XX_DEVICE_ID) { + if (dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM583XX_DEVICE_ID) { b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); reg |= SW_RST | EN_SW_RST | EN_CH_RST; b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); @@ -1935,6 +1936,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM583XX_DEVICE_ID, + .dev_name = "BCM583xx/11360", + .vlans = 4096, + .enabled_ports = 0x103, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, { .chip_id = BCM7445_DEVICE_ID, .dev_name = "BCM7445", diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 75d9ffb75da8..df149756c282 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -62,6 +62,7 @@ enum { BCM53018_DEVICE_ID = 0x53018, BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, + BCM583XX_DEVICE_ID = 0x58300, BCM7445_DEVICE_ID = 0x7445, BCM7278_DEVICE_ID = 0x7278, }; @@ -181,6 +182,7 @@ static inline int is5301x(struct b53_device *dev) static inline int is58xx(struct b53_device *dev) { return dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM583XX_DEVICE_ID || dev->chip_id == BCM7445_DEVICE_ID || dev->chip_id == BCM7278_DEVICE_ID; } diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index c37ffd1b6833..8247481eaa06 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -364,7 +364,7 @@ static const struct of_device_id b53_srab_of_match[] = { { .compatible = "brcm,bcm53018-srab" }, { .compatible = "brcm,bcm53019-srab" }, { .compatible = "brcm,bcm5301x-srab" }, - { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID }, { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID }, @@ -372,7 +372,7 @@ static const struct of_device_id b53_srab_of_match[] = { { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, - { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID }, { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, { /* sentinel */ }, }; -- cgit v1.2.3 From dd612f18a49b63af8b3a5f572d999bdb197385bc Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 6 Jun 2018 15:03:22 +0200 Subject: bnx2x: use the right constant Nearby code that also tests port suggests that the P0 constant should be used when port is zero. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression e,e1; @@ * e ? e1 : e1 // Fixes: 6c3218c6f7e5 ("bnx2x: Adjust ETS to 578xx") Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 7dd83d0ef0a0..22243c480a05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, * slots for the highest priority. */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ -- cgit v1.2.3 From 1819e40908ee76c7219287224c22c772556c927e Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Wed, 6 Jun 2018 14:07:51 +0100 Subject: net: hns3: Fix for VF mailbox cannot receiving PF response When the VF frequently switches the CMDQ interrupt, if the CMDQ_SRC is not cleared, the VF will not receive the new PF response after the interrupt is re-enabled, the corresponding log is as follows: [ 317.482222] hns3 0000:00:03.0: VF could not get mbx resp(=0) from PF in 500 tries [ 317.483137] hns3 0000:00:03.0: VF request to get tqp info from PF failed -5 This patch fixes this problem by clearing CMDQ_SRC before enabling interrupt and syncing pending IRQ handlers after disabling interrupt. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index bc8a5760d959..a17872aab168 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1565,6 +1565,8 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) return ret; } + hclgevf_clear_event_cause(hdev, 0); + /* enable misc. vector(vector 0) */ hclgevf_enable_vector(&hdev->misc_vector, true); @@ -1575,6 +1577,7 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) { /* disable misc vector(vector 0) */ hclgevf_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); free_irq(hdev->misc_vector.vector_irq, hdev); hclgevf_free_vector(hdev, 0); } -- cgit v1.2.3 From 6444e2a5f1e680278b58ced3568bdff84afe14a5 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Wed, 6 Jun 2018 14:07:52 +0100 Subject: net: hns3: Fix for VF mailbox receiving unknown message Before the firmware updates the crq's tail pointer, if the VF driver reads the data in the crq, the data may be incomplete at this time, which will lead to the driver read an unknown message. This patch fixes it by checking if crq is empty before reading the message. Fixes: b11a0bb231f3 ("net: hns3: Add mailbox support to VF driver") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 23 +++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index a28618428338..b598c06af8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -126,6 +126,13 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, return status; } +static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) +{ + u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { struct hclgevf_mbx_resp_status *resp; @@ -140,11 +147,22 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) resp = &hdev->mbx_resp; crq = &hdev->hw.cmq.crq; - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) { + while (!hclgevf_cmd_crq_empty(&hdev->hw)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %d\n", + req->msg[0]); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + /* synchronous messages are time critical and need preferential * treatment. Therefore, we need to acknowledge all the sync * responses as quickly as possible so that waiting tasks do not @@ -205,7 +223,6 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ -- cgit v1.2.3 From 8e52a602b5126183f7a6487c4d48f6a00af4e4fd Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Wed, 6 Jun 2018 14:07:53 +0100 Subject: net: hns3: Optimize PF CMDQ interrupt switching process When the PF frequently switches the CMDQ interrupt, if the CMDQ_SRC is not cleared before the hardware interrupt is generated, the new interrupt will not be reported. This patch optimizes this problem by clearing CMDQ_SRC and RESET_STS before enabling interrupt and syncing pending IRQ handlers after disabling interrupt. Fixes: 466b0c00391b ("net: hns3: Add support for misc interrupt") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2a801344eafb..d318d35e598f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2557,6 +2557,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, } } +static void hclge_clear_all_event_cause(struct hclge_dev *hdev) +{ + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, + BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | + BIT(HCLGE_VECTOR0_CORERESET_INT_B) | + BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); +} + static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) { writel(enable ? 1 : 0, vector->addr); @@ -5688,6 +5697,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); + hclge_clear_all_event_cause(hdev); + /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); @@ -5817,6 +5828,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); -- cgit v1.2.3